Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2017-10-02 09:41:14 -03:00
20 changed files with 663 additions and 3680 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.1.1" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.1.2" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,16 +19,13 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» canalpelis » allcalidad
» cinefox » cineasiaenlinea
» cinetux » divxatope
» maxipelis » pedropolis
» doomtv » animeshd
» hdfull » ultrapelishd
» pelisplus » cinecalidad
» peliculasnu » allpeliculas
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]prpeaprendiz[/COLOR] por su colaboración en esta versión[/COLOR]
» cinecalidad » estadepelis
» datoporn » seriesyonkis
» allcalidad » allpeliculas
» cartoonlatino » pasateatorrent
» vidz7 » zonatorrent
» gvideo » okru
» openload ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -138,8 +138,8 @@ def findvideos(item):
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle
))
return itemlist

View File

@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
import string
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -22,6 +24,7 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65
"67": "spruto", "71": "stormo", "73": "idowatch", "48": "okru", "55": "openload",
"20": "nowvideo", "84": "fastplay", "96": "raptu", "94": "tusfiles"}
host = "http://allpeliculas.com/"
def mainlist(item):
logger.info()
@@ -29,32 +32,111 @@ def mainlist(item):
item.text_color = color1
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
url="http://allpeliculas.co/Movies/fullView/1/0/&ajax=1"))
itemlist.append(item.clone(title="Series", action="lista", fanart="http://i.imgur.com/9loVksV.png", extra="tv",
url="http://allpeliculas.co/Movies/fullView/1/86/?ajax=1&withoutFilter=1", ))
itemlist.append(item.clone(title="Géneros", action="subindice", fanart="http://i.imgur.com/ymazCWq.jpg"))
itemlist.append(item.clone(title="Índices", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
url= host + "movies/newmovies?page=1", extra1 = 0))
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/getGanres"))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
dict_data = jsontools.load(data)
for it in dict_data:
itemlist.append(Item(
channel = item.channel,
action = "lista",
title = it['label'],
url = host + "movies/newmovies?page=1",
extra1 = it['id']
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'data-link="([^"]+).*?'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, calidad in matches:
itemlist.append(Item(
channel = item.channel,
action = "play",
title = calidad,
url = url,
))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.append(Item(channel=item.channel))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle
))
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
return itemlist
def lista(item):
logger.info()
itemlist = []
dict_param = dict()
item.infoLabels = {}
item.text_color = color2
params = '{}'
if item.extra1 != 0:
dict_param["genero"] = [item.extra1]
params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data
dict_data = jsontools.load(data)
for it in dict_data["items"]:
title = it["title"]
plot = it["slogan"]
rating = it["imdb"]
year = it["year"]
url = host + "pelicula/" + it["slug"]
thumb = urlparse.urljoin(host, it["image"])
item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
item.url = item.url.replace(pagina, "")
if pagina == "":
pagina = "0"
pagina = int(pagina) + 1
item.url = item.url + "%s" %pagina
if item.extra != "busqueda":
itemlist.append(Item(channel = item.channel, action="lista", title="Pagina %s" %pagina, url=item.url, extra1 = item.extra1
))
try:
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=" + texto + "&ajax=1"
item.url = host + "/movies/search/" + texto
item.extra = "busqueda"
try:
return busqueda(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -68,7 +150,7 @@ def newest(categoria):
item = Item()
try:
if categoria == "peliculas":
item.url = "http://allpeliculas.co/Movies/fullView/1/0/&ajax=1"
item.url = host + "movies/newmovies?page=1"
item.action = "lista"
itemlist = lista(item)
@@ -83,402 +165,3 @@ def newest(categoria):
return []
return itemlist
def busqueda(item):
logger.info()
itemlist = []
item.infoLabels = {}
item.text_color = color2
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patron = '<img class="poster" src="([^"]+)".*?<div class="vote-div-count".*?>(.*?)/.*?' \
'<a class="movie-list-link" href="([^"]+)" title="([^"]+)".*?' \
'Year:</b> (.*?) </p>.*?Género:</b> (.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, vote, url, title, year, genre in matches:
url = "http://allpeliculas.co" + url.replace("#", "") + "&ajax=1"
thumbnail = thumbnail.replace("/105/", "/400/").replace("/141/", "/600/").replace(" ", "%20")
titulo = title + " (" + year + ")"
item.infoLabels['year'] = year
item.infoLabels['genre'] = genre
item.infoLabels['rating'] = vote
if "Series" not in genre:
itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, contentType="movie"))
else:
itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, contentType="tvshow"))
# Paginacion
next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"')
if next_page != "":
url = next_page.replace("#", "") + "&ajax=1"
itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3))
return itemlist
def indices(item):
logger.info()
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Alfabético", action="subindice"))
itemlist.append(item.clone(title="Por idioma", action="subindice"))
itemlist.append(item.clone(title="Por valoración", action="lista",
url="http://allpeliculas.co/Movies/fullView/1/0/rating:imdb|date:1900-3000|"
"alphabet:all|?ajax=1&withoutFilter=1"))
itemlist.append(item.clone(title="Por año", action="subindice"))
itemlist.append(item.clone(title="Por calidad", action="subindice"))
return itemlist
def lista(item):
logger.info()
itemlist = []
item.infoLabels = {}
item.text_color = color2
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
bloque = scrapertools.find_single_match(data, '<div class="movies-block-main"(.*?)<div class="movies-'
'long-pagination"')
patron = '<div class="thumb"><img src="([^"]+)".*?<a href="([^"]+)".*?' \
'(?:class="n-movie-trailer">([^<]+)<\/span>|<div class="imdb-votes">)' \
'.*?<div class="imdb"><span>(.*?)</span>.*?<span>Year.*?">(.*?)</a>.*?<span>' \
'(?:Género|Genre).*?<span>(.*?)</span>.*?<span>Language.*?<span>(.*?)</span>.*?' \
'<div class="info-full-text".*?>(.*?)<.*?<div class="views">(.*?)<.*?' \
'<div class="movie-block-title".*?>(.*?)<'
if bloque == "":
bloque = data[:]
matches = scrapertools.find_multiple_matches(bloque, patron)
for thumbnail, url, trailer, vote, year, genre, idioma, sinopsis, calidad, title in matches:
url = url.replace("#", "") + "&ajax=1"
thumbnail = thumbnail.replace("/157/", "/400/").replace("/236/", "/600/").replace(" ", "%20")
idioma = idioma.replace(" ", "").split(",")
idioma.sort()
titleidioma = "[" + "/".join(idioma) + "]"
titulo = title + " " + titleidioma + " [" + calidad + "]"
item.infoLabels['plot'] = sinopsis
item.infoLabels['year'] = year
item.infoLabels['genre'] = genre
item.infoLabels['rating'] = vote
item.infoLabels['trailer'] = trailer.replace("youtu.be/", "http://www.youtube.com/watch?v=")
if item.extra != "tv" or "Series" not in genre:
itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, contentType="movie"))
else:
itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, show=title,
contentType="tvshow"))
try:
from core import tmdb
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
# Paginacion
next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"')
if next_page != "":
url = next_page.replace("#", "") + "&ajax=1"
itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3))
return itemlist
def subindice(item):
logger.info()
itemlist = []
url_base = "http://allpeliculas.co/Movies/fullView/1/0/date:1900-3000|alphabet:all|?ajax=1&withoutFilter=1"
indice_genero, indice_alfa, indice_idioma, indice_year, indice_calidad = dict_indices()
if "Géneros" in item.title:
for key, value in indice_genero.items():
url = url_base.replace("/0/", "/" + key + "/")
itemlist.append(item.clone(action="lista", title=value, url=url))
itemlist.sort(key=lambda item: item.title)
elif "Alfabético" in item.title:
for i in range(len(indice_alfa)):
url = url_base.replace(":all", ":" + indice_alfa[i])
itemlist.append(item.clone(action="lista", title=indice_alfa[i], url=url))
elif "Por idioma" in item.title:
for key, value in indice_idioma.items():
url = url_base.replace("3000|", "3000|language:" + key)
itemlist.append(item.clone(action="lista", title=value, url=url))
itemlist.sort(key=lambda item: item.title)
elif "Por año" in item.title:
for i in range(len(indice_year)):
year = indice_year[i]
url = url_base.replace("1900-3000", year + "-" + year)
itemlist.append(item.clone(action="lista", title=year, url=url))
elif "Por calidad" in item.title:
for key, value in indice_calidad.items():
url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=&movieDirector=&movieGenre" \
"=&movieActor=&movieYear=&language=&movieTypeId=" + key + "&ajax=1"
itemlist.append(item.clone(action="busqueda", title=value, url=url))
itemlist.sort(key=lambda item: item.title)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
item.text_color = color3
# Rellena diccionarios idioma y calidad
idiomas_videos, calidad_videos = dict_videos()
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
if item.extra != "library":
try:
from core import tmdb
tmdb.set_infoLabels(item, __modo_grafico__)
except:
pass
# Enlaces Online
patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
'"([^"]+)".*?online-link="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, servidor_num, language, url in matches:
if servidor_num == '94' and not 'stormo.tv' in url:
url = "http://tusfiles.org/?%s" % url
if 'vimeo' in url:
url += "|" + item.url
if "filescdn" in url and url.endswith("htm"):
url += "l"
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, extra=idioma))
# Enlace Descarga
patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
'"([^"]+)".*?online-link="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, servidor_num, language, url in matches:
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "[%s] [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, extra=idioma))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda item: (item.extra, item.server))
if itemlist:
if not "trailer" in item.infoLabels:
trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
matches = scrapertools.find_multiple_matches(data, '<a class="movie-season" data-id="([^"]+)"')
matches = list(set(matches))
for season in matches:
item.infoLabels['season'] = season
itemlist.append(item.clone(action="episodios", title="Temporada " + season, context=["buscar_trailer"],
contentType="season"))
itemlist.sort(key=lambda item: item.title)
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if not "trailer" in item.infoLabels:
trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Rellena diccionarios idioma y calidad
idiomas_videos, calidad_videos = dict_videos()
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patron = '<li><a class="movie-episode"[^>]+season="' + str(item.infoLabels['season']) + '"[^>]+>([^<]+)</a></li>'
matches = scrapertools.find_multiple_matches(data, patron)
capitulos = []
for title in matches:
if not title in capitulos:
episode = int(title.split(" ")[1])
capitulos.append(title)
itemlist.append(
item.clone(action="findvideostv", title=title, contentEpisodeNumber=episode, contentType="episode"))
itemlist.sort(key=lambda item: item.contentEpisodeNumber)
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
for item in itemlist:
if item.infoLabels["episodio_titulo"]:
item.title = "%dx%02d: %s" % (
item.contentSeason, item.contentEpisodeNumber, item.infoLabels["episodio_titulo"])
else:
item.title = "%dx%02d: %s" % (item.contentSeason, item.contentEpisodeNumber, item.title)
return itemlist
def findvideostv(item):
logger.info()
itemlist = []
# Rellena diccionarios idioma y calidad
idiomas_videos, calidad_videos = dict_videos()
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
'" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
% (str(item.infoLabels['episode']), str(item.infoLabels['season']))
matches = scrapertools.find_multiple_matches(data, patron)
for quality, servidor_num, language, url in matches:
if servidor_num == '94' and not 'stormo.tv' in url:
url = "http://tusfiles.org/?%s" % url
if 'vimeo' in url:
url += "|" + item.url
if "filescdn" in url and url.endswith("htm"):
url += "l"
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] (" + calidad_videos.get(quality) + ")"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, contentType="episode"))
# Enlace Descarga
patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
'" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
% (str(item.infoLabels['episode']), str(item.infoLabels['season']))
# patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="'+str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for quality, servidor_num, episode, language, url in matches:
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] (" + calidad_videos.get(quality) + ")"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma,contentType="episode", server=server))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
try:
from core import tmdb
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
return itemlist
def dict_videos():
idiomas_videos = {}
calidad_videos = {}
data = httptools.downloadpage("http://allpeliculas.co/Search/advancedSearch&ajax=1").data
data = data.replace("\n", "").replace("\t", "")
bloque_idioma = scrapertools.find_single_match(data,
'<select name="language".*?<option value="" selected(.*?)</select>')
matches = scrapertools.find_multiple_matches(bloque_idioma, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
idiomas_videos[key1] = unicode(key2, "utf8").capitalize().encode("utf8")
bloque_calidad = scrapertools.find_single_match(data, '<select name="movieTypeId".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_calidad, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
calidad_videos[key1] = key2
return idiomas_videos, calidad_videos
def dict_indices():
indice_genero = {}
indice_alfa = list(string.ascii_uppercase)
indice_alfa.append("0-9")
indice_idioma = {}
indice_year = []
indice_calidad = {}
data = httptools.downloadpage("http://allpeliculas.co/Search/advancedSearch&ajax=1").data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
bloque_genero = scrapertools.find_single_match(data, '<select name="movieGenre".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_genero, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
if key2 != "Series":
if key2 == "Mystery":
key2 = "Misterio"
indice_genero[key1] = key2
bloque_year = scrapertools.find_single_match(data, '<select name="movieYear".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_year, '<option value="([^"]+)"')
for key1 in matches:
indice_year.append(key1)
bloque_idioma = scrapertools.find_single_match(data, '<select name="language".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_idioma, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
if key2 == "INGLES":
key2 = "Versión original"
indice_idioma[key1] = unicode(key2, "utf8").capitalize().encode("utf8")
bloque_calidad = scrapertools.find_single_match(data, '<select name="movieTypeId".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_calidad, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
indice_calidad[key1] = key2
return indice_genero, indice_alfa, indice_idioma, indice_year, indice_calidad

View File

@@ -136,6 +136,10 @@ def start(itemlist, item):
server_list = channel_node.get('servers', [])
quality_list = channel_node.get('quality', [])
# Si no se definen calidades la se asigna default como calidad unica
if len(quality_list) == 0:
quality_list =['default']
# Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload',
# 'streamcloud']
for num in range(1, 4):
@@ -325,6 +329,8 @@ def init(channel, list_servers, list_quality):
change = True
# Se comprueba que no haya calidades ni servidores duplicados
if 'default' not in list_quality:
list_quality.append('default')
list_servers = list(set(list_servers))
list_quality = list(set(list_quality))

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -176,6 +176,8 @@ def findvideos(item):
data_function = scrapertools.find_single_match(data, '<!\[CDATA\[function (.+?)\]\]')
data_id = scrapertools.find_single_match(data,
"<script>\(adsbygoogle = window\.adsbygoogle \|\| \[\]\)\.push\({}\);<\/script><\/div><br \/>(.+?)<\/ins>")
if data_id == "":
data_id = scrapertools.find_single_match(data, "<p><center><br />.*?</center>")
itemla = scrapertools.find_multiple_matches(data_function, "src='(.+?)'")
serverid = scrapertools.find_multiple_matches(data_id, '<script>([^"]+)\("([^"]+)"\)')
for server, id in serverid:

View File

@@ -51,24 +51,21 @@ def mainlist(item):
host="http://cinecalidad.com/",
thumbnail=thumbmx,
extra="peliculas",
language='latino'
))
itemlist.append(item.clone(title="CineCalidad España",
itemlist.append(item.clone(title="CineCalidad Castellano",
action="submenu",
host="http://cinecalidad.com/espana/",
thumbnail=thumbes,
extra="peliculas",
language='castellano'
))
itemlist.append(
item.clone(title="CineCalidad Brasil",
item.clone(title="CineCalidad Portugues",
action="submenu",
host="http://cinemaqualidade.com/",
thumbnail=thumbbr,
extra="filmes",
language='portugues'
))
autoplay.show_option(item.channel, itemlist)
@@ -91,7 +88,6 @@ def submenu(item):
url=host,
thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Destacadas",
@@ -99,7 +95,6 @@ def submenu(item):
url=host + "/genero-" + idioma + "/" + idioma2 + "/",
thumbnail='https://s30.postimg.org/humqxklsx/destacadas.png',
fanart='https://s30.postimg.org/humqxklsx/destacadas.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Generos",
@@ -107,7 +102,6 @@ def submenu(item):
url=host + "/genero-" + idioma,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Por Año",
@@ -115,7 +109,6 @@ def submenu(item):
url=host + "/" + idioma + "-por-ano",
thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png',
fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Buscar",
@@ -124,7 +117,6 @@ def submenu(item):
url=host + '/apiseries/seriebyword/',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
host=item.host,
language=item.language
))
return itemlist
@@ -199,6 +191,12 @@ def generos(item):
def peliculas(item):
logger.info()
itemlist = []
if 'espana' in host:
item.language = 'castellano'
elif 'cinecalidad' in host:
item.language = 'latino'
else:
item.language = 'portugues'
data = httptools.downloadpage(item.url).data
patron = '<div class="home_post_cont.*? post_box">.*?<a href="(.*?)".*?'
patron += 'src="(.*?)".*?title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'

View File

@@ -40,7 +40,7 @@ def lista(item):
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Next')
next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))

View File

@@ -18,7 +18,7 @@ headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/2
IDIOMAS = {'Latino': 'Latino', 'Sub Español': 'VOS'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['yourupload', 'openload', 'sendvid', '']
list_servers = ['yourupload', 'openload', 'sendvid']
vars = {
'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://www.estadepelis.com/',

View File

@@ -6,23 +6,12 @@
"language": ["cast"],
"thumbnail": "http://imgur.com/iLeISt0.png",
"banner": "pasateatorrent.png",
"fanart": "http://imgur.com/uexmGEg.png",
"version": 1,
"changes": [
{
"date": "06/12/2016",
"description": "Release"
},
{
"date": "13/01/2017",
"description": "Arreglo sagas en peliculas que no se mostraban.Mejoras en series/info-capitulos"
},
{
"date": "04/04/2017",
"description": "Migración httptools.Adaptación proxy según Kodi sea igual o menor v.17.Pequeñas mejoras código"
},
{
"date": "28/06/2017",
"description": "Correciones código y algunas mejoras"
"date": "25/08/2017",
"description": "Revamp"
}
],
"categories": [
@@ -38,6 +27,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"enabled": true,
"visible": true
}
]
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -1,38 +0,0 @@
{
"id": "verseriesonlinetv",
"name": "Veriesonlinetv",
"active": true,
"adult": false,
"language": ["lat"],
"banner": "verseriesonlinetv.png",
"thumbnail": "http://s6.postimg.org/gl0ok4t01/verserieslogo.png",
"version": 1,
"changes": [
{
"date": "17/12/2016",
"description": "Mejora código y adaptación Infoplus"
},
{
"date": "04/04/2017",
"description": "Migración a Httptools"
},
{
"date": "28/06/2017",
"description": "Corrección código y algunas mejoras"
}
],
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,9 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
@@ -38,7 +40,7 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<li><a href="([^"]+)">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -52,7 +54,7 @@ def lista(item):
logger.info()
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
@@ -83,10 +85,8 @@ def play(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.unescape(data)
logger.info(data)
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.thumbnail = item.thumbnail

View File

@@ -0,0 +1,32 @@
{
"id": "zonatorrent",
"name": "ZonaTorrent",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "",
"thumbnail": "https://zonatorrent.org/wp-content/uploads/2017/04/zonatorrent-New-Logo.png",
"version": 1,
"categories": [
"torrent",
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,159 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger
__channel__ = "zonatorrent"
HOST = 'https://zonatorrent.org'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Últimas Películas", action="listado", url=HOST, page=False))
itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico"))
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url=HOST))
itemlist.append(Item(channel=item.channel, title="Más vistas", action="listado", url=HOST + "/peliculas-mas-vistas/"))
itemlist.append(Item(channel=item.channel, title="Más votadas", action="listado", url=HOST + "/peliculas-mas-votadas/"))
itemlist.append(Item(channel=item.channel, title="Castellano", action="listado", url=HOST + "/?s=spanish",
page=True))
itemlist.append(Item(channel=item.channel, title="Latino", action="listado", url=HOST + "/?s=latino", page=True))
itemlist.append(Item(channel=item.channel, title="Subtitulado", action="listado", url=HOST + "/?s=Subtitulado",
page=True))
itemlist.append(Item(channel=item.channel, title="Con Torrent", action="listado", url=HOST + "/?s=torrent",
page=True))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=HOST + "/?s=",
page=False))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
for letra in "#ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(Item(channel=item.channel, action="listado", title=letra, page=True,
url=HOST + "/letters/%s/" % letra.replace("#", "0-9")))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<a href="#">Generos</a><ulclass="sub-menu">(.*?)</ul>')
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, page=True))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url + texto.replace(" ", "+")
try:
itemlist = listado(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
pattern = '<a href="(?P<url>[^"]+)"><div[^>]+><figure[^>]+><img[^>]+src="(?P<thumb>[^"]+)"[^>]+></figure></div>' \
'<h2 class="Title">(?P<title>.*?)</h2>.*?<span class="Time[^>]+>(?P<duration>.*?)</span><span ' \
'class="Date[^>]+>(?P<year>.*?)</span><span class="Qlty">(?P<quality>.*?)</span></p><div ' \
'class="Description"><p>.*?\:\s*(?P<plot>.*?)</p>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title, duration, year, quality, plot in matches:
#title = title.strip().replace("Spanish Online Torrent", "").replace("Latino Online Torrent", "").replace(r'\d{4}','')
title = re.sub('Online|Spanish|Latino|Torrent|\d{4}','',title)
infoLabels = {"year": year}
aux = scrapertools.find_single_match(duration, "(\d+)h\s*(\d+)m")
duration = "%s" % ((int(aux[0]) * 3600) + (int(aux[1]) * 60))
infoLabels["duration"] = duration
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
contentTitle=title, plot=plot, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page:
pattern = "<span class='page-numbers current'>[^<]+</span><a class='page-numbers' href='([^']+)'"
url = scrapertools.find_single_match(data, pattern)
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url, page=True,
thumbnail=get_thumb("next.png")))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = re.sub(r"&quot;", '"', data)
data = re.sub(r"&lt;", '<', data)
titles = re.compile('data-TPlayerNv="Opt\d+">.*? <span>(.*?)</span></li>', re.DOTALL).findall(data)
urls = re.compile('id="Opt\d+"><iframe[^>]+src="([^"]+)"', re.DOTALL).findall(data)
if len(titles) == len(urls):
for i in range(0, len(titles)):
if i > 0:
title = "Online %s " % titles[i].strip()
else:
title = titles[0]
if "goo.gl" in urls[i]:
urls[i] = httptools.downloadpage(urls[i], follow_redirects=False, only_headers=True)\
.headers.get("location", "")
videourl = servertools.findvideos(urls[i])
if len(videourl) > 0:
itemlist.append(Item(channel=item.channel, action="play", title=title, url=videourl[0][1],
server=videourl[0][0], thumbnail=videourl[0][3], fulltitle=item.title))
pattern = '<a[^>]+href="([^"]+)"[^<]+</a></td><td><span><img[^>]+>(.*?)</span></td><td><span><img[^>]+>(.*?)' \
'</span></td><td><span>(.*?)</span>'
torrents = re.compile(pattern, re.DOTALL).findall(data)
if len(torrents) > 0:
for url, text, lang, quality in torrents:
title = "%s %s - %s" % (text, lang, quality)
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, server="torrent",
fulltitle=item.title, thumbnail=get_thumb("channels_torrent.png")))
return itemlist

View File

@@ -446,9 +446,7 @@ def mkdir(path):
except:
logger.error("ERROR al crear el directorio: %s" % path)
logger.error(traceback.format_exc())
# platformtools.dialog_notification("Error al crear el directorio", path)
platformtools.dialog_ok("Alfa", "[COLOR red][B]IMPORTANTE[/B][/COLOR] - Instale y Ejecute el script 'Fix version 1.7.0', que se encuentra en el repositorio de Alfa y "
"vuelva a entrar en el addon, [B]si no lo hace tendrá problemas[/B]")
platformtools.dialog_notification("Error al crear el directorio", path)
return False
else:
return True

View File

@@ -14,7 +14,7 @@
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://drive.google.com/file/d/([^/]+)/preview",
"pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{

View File

@@ -22,7 +22,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "//(?:www.)?ok.../(?:videoembed|video)/(\\d+)",
"pattern": "(?:www.)?ok.../(?:videoembed|video)/(\\d+)",
"url": "http://ok.ru/videoembed/\\1"
}
]
@@ -63,4 +63,4 @@
],
"thumbnail": "server_okru.png",
"version": 1
}
}

View File

@@ -18,7 +18,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:openload|oload).../(?:embed|f)/([0-9a-zA-Z-_]+)",
"pattern": "(?:openload|oload).*?/(?:embed|f)/([0-9a-zA-Z-_]+)",
"url": "https://openload.co/embed/\\1/"
}
]
@@ -57,4 +57,4 @@
],
"thumbnail": "server_openload.png",
"version": 1
}
}