Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
Unknown
2017-10-02 09:43:03 -03:00
32 changed files with 728 additions and 3748 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.1.0" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.1.2" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,11 +19,13 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» canalpelis » hdfull
» xdvideos » playmax
» cinetux » gnula
» flashx » rapidvideo
¤ arreglos internos
» cinecalidad » estadepelis
» datoporn » seriesyonkis
» allcalidad » allpeliculas
» cartoonlatino » pasateatorrent
» vidz7 » zonatorrent
» gvideo » okru
» openload ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -91,7 +91,8 @@ def peliculas(item):
thumbnail = thumbnail,
url = url,
contentTitle = titulo,
contentType="movie"
contentType="movie",
language = idioma
)
if year:
new_item.infoLabels['year'] = int(year)
@@ -137,8 +138,8 @@ def findvideos(item):
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle
))
return itemlist

View File

@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
import string
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -22,6 +24,7 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65
"67": "spruto", "71": "stormo", "73": "idowatch", "48": "okru", "55": "openload",
"20": "nowvideo", "84": "fastplay", "96": "raptu", "94": "tusfiles"}
host = "http://allpeliculas.com/"
def mainlist(item):
logger.info()
@@ -29,32 +32,111 @@ def mainlist(item):
item.text_color = color1
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
url="http://allpeliculas.co/Movies/fullView/1/0/&ajax=1"))
itemlist.append(item.clone(title="Series", action="lista", fanart="http://i.imgur.com/9loVksV.png", extra="tv",
url="http://allpeliculas.co/Movies/fullView/1/86/?ajax=1&withoutFilter=1", ))
itemlist.append(item.clone(title="Géneros", action="subindice", fanart="http://i.imgur.com/ymazCWq.jpg"))
itemlist.append(item.clone(title="Índices", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
url= host + "movies/newmovies?page=1", extra1 = 0))
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/getGanres"))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
dict_data = jsontools.load(data)
for it in dict_data:
itemlist.append(Item(
channel = item.channel,
action = "lista",
title = it['label'],
url = host + "movies/newmovies?page=1",
extra1 = it['id']
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'data-link="([^"]+).*?'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, calidad in matches:
itemlist.append(Item(
channel = item.channel,
action = "play",
title = calidad,
url = url,
))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.append(Item(channel=item.channel))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle
))
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
return itemlist
def lista(item):
logger.info()
itemlist = []
dict_param = dict()
item.infoLabels = {}
item.text_color = color2
params = '{}'
if item.extra1 != 0:
dict_param["genero"] = [item.extra1]
params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data
dict_data = jsontools.load(data)
for it in dict_data["items"]:
title = it["title"]
plot = it["slogan"]
rating = it["imdb"]
year = it["year"]
url = host + "pelicula/" + it["slug"]
thumb = urlparse.urljoin(host, it["image"])
item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
item.url = item.url.replace(pagina, "")
if pagina == "":
pagina = "0"
pagina = int(pagina) + 1
item.url = item.url + "%s" %pagina
if item.extra != "busqueda":
itemlist.append(Item(channel = item.channel, action="lista", title="Pagina %s" %pagina, url=item.url, extra1 = item.extra1
))
try:
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=" + texto + "&ajax=1"
item.url = host + "/movies/search/" + texto
item.extra = "busqueda"
try:
return busqueda(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -68,7 +150,7 @@ def newest(categoria):
item = Item()
try:
if categoria == "peliculas":
item.url = "http://allpeliculas.co/Movies/fullView/1/0/&ajax=1"
item.url = host + "movies/newmovies?page=1"
item.action = "lista"
itemlist = lista(item)
@@ -83,402 +165,3 @@ def newest(categoria):
return []
return itemlist
def busqueda(item):
logger.info()
itemlist = []
item.infoLabels = {}
item.text_color = color2
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patron = '<img class="poster" src="([^"]+)".*?<div class="vote-div-count".*?>(.*?)/.*?' \
'<a class="movie-list-link" href="([^"]+)" title="([^"]+)".*?' \
'Year:</b> (.*?) </p>.*?Género:</b> (.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, vote, url, title, year, genre in matches:
url = "http://allpeliculas.co" + url.replace("#", "") + "&ajax=1"
thumbnail = thumbnail.replace("/105/", "/400/").replace("/141/", "/600/").replace(" ", "%20")
titulo = title + " (" + year + ")"
item.infoLabels['year'] = year
item.infoLabels['genre'] = genre
item.infoLabels['rating'] = vote
if "Series" not in genre:
itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, contentType="movie"))
else:
itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, contentType="tvshow"))
# Paginacion
next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"')
if next_page != "":
url = next_page.replace("#", "") + "&ajax=1"
itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3))
return itemlist
def indices(item):
logger.info()
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Alfabético", action="subindice"))
itemlist.append(item.clone(title="Por idioma", action="subindice"))
itemlist.append(item.clone(title="Por valoración", action="lista",
url="http://allpeliculas.co/Movies/fullView/1/0/rating:imdb|date:1900-3000|"
"alphabet:all|?ajax=1&withoutFilter=1"))
itemlist.append(item.clone(title="Por año", action="subindice"))
itemlist.append(item.clone(title="Por calidad", action="subindice"))
return itemlist
def lista(item):
logger.info()
itemlist = []
item.infoLabels = {}
item.text_color = color2
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
bloque = scrapertools.find_single_match(data, '<div class="movies-block-main"(.*?)<div class="movies-'
'long-pagination"')
patron = '<div class="thumb"><img src="([^"]+)".*?<a href="([^"]+)".*?' \
'(?:class="n-movie-trailer">([^<]+)<\/span>|<div class="imdb-votes">)' \
'.*?<div class="imdb"><span>(.*?)</span>.*?<span>Year.*?">(.*?)</a>.*?<span>' \
'(?:Género|Genre).*?<span>(.*?)</span>.*?<span>Language.*?<span>(.*?)</span>.*?' \
'<div class="info-full-text".*?>(.*?)<.*?<div class="views">(.*?)<.*?' \
'<div class="movie-block-title".*?>(.*?)<'
if bloque == "":
bloque = data[:]
matches = scrapertools.find_multiple_matches(bloque, patron)
for thumbnail, url, trailer, vote, year, genre, idioma, sinopsis, calidad, title in matches:
url = url.replace("#", "") + "&ajax=1"
thumbnail = thumbnail.replace("/157/", "/400/").replace("/236/", "/600/").replace(" ", "%20")
idioma = idioma.replace(" ", "").split(",")
idioma.sort()
titleidioma = "[" + "/".join(idioma) + "]"
titulo = title + " " + titleidioma + " [" + calidad + "]"
item.infoLabels['plot'] = sinopsis
item.infoLabels['year'] = year
item.infoLabels['genre'] = genre
item.infoLabels['rating'] = vote
item.infoLabels['trailer'] = trailer.replace("youtu.be/", "http://www.youtube.com/watch?v=")
if item.extra != "tv" or "Series" not in genre:
itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, contentType="movie"))
else:
itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
context=["buscar_trailer"], contentTitle=title, show=title,
contentType="tvshow"))
try:
from core import tmdb
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
# Paginacion
next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"')
if next_page != "":
url = next_page.replace("#", "") + "&ajax=1"
itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3))
return itemlist
def subindice(item):
logger.info()
itemlist = []
url_base = "http://allpeliculas.co/Movies/fullView/1/0/date:1900-3000|alphabet:all|?ajax=1&withoutFilter=1"
indice_genero, indice_alfa, indice_idioma, indice_year, indice_calidad = dict_indices()
if "Géneros" in item.title:
for key, value in indice_genero.items():
url = url_base.replace("/0/", "/" + key + "/")
itemlist.append(item.clone(action="lista", title=value, url=url))
itemlist.sort(key=lambda item: item.title)
elif "Alfabético" in item.title:
for i in range(len(indice_alfa)):
url = url_base.replace(":all", ":" + indice_alfa[i])
itemlist.append(item.clone(action="lista", title=indice_alfa[i], url=url))
elif "Por idioma" in item.title:
for key, value in indice_idioma.items():
url = url_base.replace("3000|", "3000|language:" + key)
itemlist.append(item.clone(action="lista", title=value, url=url))
itemlist.sort(key=lambda item: item.title)
elif "Por año" in item.title:
for i in range(len(indice_year)):
year = indice_year[i]
url = url_base.replace("1900-3000", year + "-" + year)
itemlist.append(item.clone(action="lista", title=year, url=url))
elif "Por calidad" in item.title:
for key, value in indice_calidad.items():
url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=&movieDirector=&movieGenre" \
"=&movieActor=&movieYear=&language=&movieTypeId=" + key + "&ajax=1"
itemlist.append(item.clone(action="busqueda", title=value, url=url))
itemlist.sort(key=lambda item: item.title)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
item.text_color = color3
# Rellena diccionarios idioma y calidad
idiomas_videos, calidad_videos = dict_videos()
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
if item.extra != "library":
try:
from core import tmdb
tmdb.set_infoLabels(item, __modo_grafico__)
except:
pass
# Enlaces Online
patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
'"([^"]+)".*?online-link="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, servidor_num, language, url in matches:
if servidor_num == '94' and not 'stormo.tv' in url:
url = "http://tusfiles.org/?%s" % url
if 'vimeo' in url:
url += "|" + item.url
if "filescdn" in url and url.endswith("htm"):
url += "l"
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, extra=idioma))
# Enlace Descarga
patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
'"([^"]+)".*?online-link="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, servidor_num, language, url in matches:
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "[%s] [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, extra=idioma))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda item: (item.extra, item.server))
if itemlist:
if not "trailer" in item.infoLabels:
trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
matches = scrapertools.find_multiple_matches(data, '<a class="movie-season" data-id="([^"]+)"')
matches = list(set(matches))
for season in matches:
item.infoLabels['season'] = season
itemlist.append(item.clone(action="episodios", title="Temporada " + season, context=["buscar_trailer"],
contentType="season"))
itemlist.sort(key=lambda item: item.title)
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if not "trailer" in item.infoLabels:
trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Rellena diccionarios idioma y calidad
idiomas_videos, calidad_videos = dict_videos()
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patron = '<li><a class="movie-episode"[^>]+season="' + str(item.infoLabels['season']) + '"[^>]+>([^<]+)</a></li>'
matches = scrapertools.find_multiple_matches(data, patron)
capitulos = []
for title in matches:
if not title in capitulos:
episode = int(title.split(" ")[1])
capitulos.append(title)
itemlist.append(
item.clone(action="findvideostv", title=title, contentEpisodeNumber=episode, contentType="episode"))
itemlist.sort(key=lambda item: item.contentEpisodeNumber)
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
for item in itemlist:
if item.infoLabels["episodio_titulo"]:
item.title = "%dx%02d: %s" % (
item.contentSeason, item.contentEpisodeNumber, item.infoLabels["episodio_titulo"])
else:
item.title = "%dx%02d: %s" % (item.contentSeason, item.contentEpisodeNumber, item.title)
return itemlist
def findvideostv(item):
logger.info()
itemlist = []
# Rellena diccionarios idioma y calidad
idiomas_videos, calidad_videos = dict_videos()
data = httptools.downloadpage(item.url).data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
'" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
% (str(item.infoLabels['episode']), str(item.infoLabels['season']))
matches = scrapertools.find_multiple_matches(data, patron)
for quality, servidor_num, language, url in matches:
if servidor_num == '94' and not 'stormo.tv' in url:
url = "http://tusfiles.org/?%s" % url
if 'vimeo' in url:
url += "|" + item.url
if "filescdn" in url and url.endswith("htm"):
url += "l"
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] (" + calidad_videos.get(quality) + ")"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, contentType="episode"))
# Enlace Descarga
patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
'" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
% (str(item.infoLabels['episode']), str(item.infoLabels['season']))
# patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="'+str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for quality, servidor_num, episode, language, url in matches:
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] (" + calidad_videos.get(quality) + ")"
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma,contentType="episode", server=server))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
try:
from core import tmdb
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
return itemlist
def dict_videos():
idiomas_videos = {}
calidad_videos = {}
data = httptools.downloadpage("http://allpeliculas.co/Search/advancedSearch&ajax=1").data
data = data.replace("\n", "").replace("\t", "")
bloque_idioma = scrapertools.find_single_match(data,
'<select name="language".*?<option value="" selected(.*?)</select>')
matches = scrapertools.find_multiple_matches(bloque_idioma, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
idiomas_videos[key1] = unicode(key2, "utf8").capitalize().encode("utf8")
bloque_calidad = scrapertools.find_single_match(data, '<select name="movieTypeId".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_calidad, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
calidad_videos[key1] = key2
return idiomas_videos, calidad_videos
def dict_indices():
indice_genero = {}
indice_alfa = list(string.ascii_uppercase)
indice_alfa.append("0-9")
indice_idioma = {}
indice_year = []
indice_calidad = {}
data = httptools.downloadpage("http://allpeliculas.co/Search/advancedSearch&ajax=1").data
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
bloque_genero = scrapertools.find_single_match(data, '<select name="movieGenre".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_genero, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
if key2 != "Series":
if key2 == "Mystery":
key2 = "Misterio"
indice_genero[key1] = key2
bloque_year = scrapertools.find_single_match(data, '<select name="movieYear".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_year, '<option value="([^"]+)"')
for key1 in matches:
indice_year.append(key1)
bloque_idioma = scrapertools.find_single_match(data, '<select name="language".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_idioma, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
if key2 == "INGLES":
key2 = "Versión original"
indice_idioma[key1] = unicode(key2, "utf8").capitalize().encode("utf8")
bloque_calidad = scrapertools.find_single_match(data, '<select name="movieTypeId".*?<option value="" selected(.*?)'
'</select>')
matches = scrapertools.find_multiple_matches(bloque_calidad, '<option value="([^"]+)" >(.*?)</option>')
for key1, key2 in matches:
indice_calidad[key1] = key2
return indice_genero, indice_alfa, indice_idioma, indice_year, indice_calidad

View File

@@ -6,8 +6,9 @@ import urllib
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger
from platformcode import logger, config
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
@@ -110,7 +111,7 @@ def lista(item):
url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -157,29 +158,16 @@ def episodios(item):
itemlist = []
data = get_source(item.url)
patron = '<li id=epi-.*? class=list-group-item ><a href=(.*?) class=badge.*?width=25 title=(.*?)> <\/span>(.*?)<\/li>'
patron = '<li id=epi-.*? class=list-group-item ><a href=(.*?) class=badge.*?width=25 title=(.*?)> <\/span>(.*?) (\d+)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedlang, scrapedtitle in matches:
for scrapedurl, scrapedlang, scrapedtitle, episode in matches:
language = scrapedlang
title = scrapedtitle
title = scrapedtitle + " " + "1x" + episode
url = scrapedurl
itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
title = item.title
videoitem.channel = item.channel
videoitem.title = title
videoitem.action = 'play'
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios", fanart=item.thumbnail, thumbnail=item.thumbnail, contentTitle=item.show, show=item.show))
return itemlist

View File

@@ -136,6 +136,10 @@ def start(itemlist, item):
server_list = channel_node.get('servers', [])
quality_list = channel_node.get('quality', [])
# Si no se definen calidades la se asigna default como calidad unica
if len(quality_list) == 0:
quality_list =['default']
# Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload',
# 'streamcloud']
for num in range(1, 4):
@@ -325,6 +329,8 @@ def init(channel, list_servers, list_quality):
change = True
# Se comprueba que no haya calidades ni servidores duplicados
if 'default' not in list_quality:
list_quality.append('default')
list_servers = list(set(list_servers))
list_quality = list(set(list_quality))

View File

@@ -137,18 +137,18 @@ def peliculas(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches[item.page:item.page + 20]:
if 'Próximamente' not in calidad and '-XXX.jpg' not in scrapedthumbnail:
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 20]:
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
scrapedtitle, year, calidad)
scrapedtitle, year, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year, 'rating': rating},
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
title=title, context="buscar_trailer", quality = quality))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
@@ -367,7 +367,7 @@ def findvideos(item):
server = servertools.get_server_from_url(url)
title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang)
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
server=server, text_color=color3))
server=server, language = lang, text_color=color3))
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -176,6 +176,8 @@ def findvideos(item):
data_function = scrapertools.find_single_match(data, '<!\[CDATA\[function (.+?)\]\]')
data_id = scrapertools.find_single_match(data,
"<script>\(adsbygoogle = window\.adsbygoogle \|\| \[\]\)\.push\({}\);<\/script><\/div><br \/>(.+?)<\/ins>")
if data_id == "":
data_id = scrapertools.find_single_match(data, "<p><center><br />.*?</center>")
itemla = scrapertools.find_multiple_matches(data_function, "src='(.+?)'")
serverid = scrapertools.find_multiple_matches(data_id, '<script>([^"]+)\("([^"]+)"\)')
for server, id in serverid:

View File

@@ -108,9 +108,9 @@ def peliculas(item):
infolab = {'year': year}
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels=infolab,
contentTitle=title, contentType="movie"))
contentTitle=title, contentType="movie", quality=calidad))
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" href="([^"]+)"')
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page))

View File

@@ -51,24 +51,21 @@ def mainlist(item):
host="http://cinecalidad.com/",
thumbnail=thumbmx,
extra="peliculas",
language='latino'
))
itemlist.append(item.clone(title="CineCalidad España",
itemlist.append(item.clone(title="CineCalidad Castellano",
action="submenu",
host="http://cinecalidad.com/espana/",
thumbnail=thumbes,
extra="peliculas",
language='castellano'
))
itemlist.append(
item.clone(title="CineCalidad Brasil",
item.clone(title="CineCalidad Portugues",
action="submenu",
host="http://cinemaqualidade.com/",
thumbnail=thumbbr,
extra="filmes",
language='portugues'
))
autoplay.show_option(item.channel, itemlist)
@@ -91,7 +88,6 @@ def submenu(item):
url=host,
thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Destacadas",
@@ -99,7 +95,6 @@ def submenu(item):
url=host + "/genero-" + idioma + "/" + idioma2 + "/",
thumbnail='https://s30.postimg.org/humqxklsx/destacadas.png',
fanart='https://s30.postimg.org/humqxklsx/destacadas.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Generos",
@@ -107,7 +102,6 @@ def submenu(item):
url=host + "/genero-" + idioma,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Por Año",
@@ -115,7 +109,6 @@ def submenu(item):
url=host + "/" + idioma + "-por-ano",
thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png',
fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png',
language=item.language
))
itemlist.append(Item(channel=item.channel,
title="Buscar",
@@ -124,7 +117,6 @@ def submenu(item):
url=host + '/apiseries/seriebyword/',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
host=item.host,
language=item.language
))
return itemlist
@@ -199,6 +191,12 @@ def generos(item):
def peliculas(item):
logger.info()
itemlist = []
if 'espana' in host:
item.language = 'castellano'
elif 'cinecalidad' in host:
item.language = 'latino'
else:
item.language = 'portugues'
data = httptools.downloadpage(item.url).data
patron = '<div class="home_post_cont.*? post_box">.*?<a href="(.*?)".*?'
patron += 'src="(.*?)".*?title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
@@ -298,7 +296,7 @@ def findvideos(item):
if server_id in server_url:
server = server_id.lower()
thumbnail = item.contentThumbnail
thumbnail = item.thumbnail
if server_id == 'TVM':
server = 'thevideo.me'
url = server_url[server_id] + video_id + '.html'
@@ -367,7 +365,7 @@ def play(item):
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.contentThumbnail
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
else:
itemlist.append(item)

View File

@@ -364,7 +364,7 @@ def peliculas(item):
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie"))
text_color=color2, contentType="movie", quality=calidad, language=audios))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "" and item.title != "":
@@ -676,7 +676,7 @@ def get_enlaces(item, url, type):
titulo = " [%s/%s]" % (language, scrapedcalidad.strip())
itemlist.append(
item.clone(action="play", url=google_url, title=" Ver en Gvideo" + titulo, text_color=color2,
extra="", server="gvideo"))
extra="", server="gvideo", language=language, quality=scrapedcalidad.strip()))
patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
'.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)

View File

@@ -133,8 +133,8 @@ def peliculas(item):
patron += 'href="([^"]+)"'
patron += '.*?(?:<span>|<span class="year">)([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, calidad, scrapedurl, scrapedyear in matches:
calidad = scrapertools.find_single_match(calidad, '.*?quality">([^<]+)')
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, scrapedyear in matches:
quality = scrapertools.find_single_match(quality, '.*?quality">([^<]+)')
try:
fulltitle = scrapedtitle
year = scrapedyear.replace("&nbsp;", "")
@@ -143,11 +143,11 @@ def peliculas(item):
scrapedtitle = "%s (%s)" % (fulltitle, year)
except:
fulltitle = scrapedtitle
if calidad:
scrapedtitle += " [%s]" % calidad
if quality:
scrapedtitle += " [%s]" % quality
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=fulltitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentTitle=fulltitle, contentType="movie")
contentTitle=fulltitle, contentType="movie", quality=quality)
if year:
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
@@ -330,12 +330,14 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
if filtro_idioma == 3 or item.filtro:
lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
url=scrapedurl, server=scrapedserver, idioma=scrapedlanguage,
extra=item.url, contentThumbnail = item.thumbnail))
extra=item.url, contentThumbnail = item.thumbnail,
language=scrapedlanguage))
else:
idioma = dict_idiomas[language]
if idioma == filtro_idioma:
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", url=scrapedurl,
extra=item.url, contentThumbnail = item.thumbnail))
extra=item.url, contentThumbnail = item.thumbnail,
language=scrapedlanguage))
else:
if language not in filtrados:
filtrados.append(language)

View File

@@ -40,7 +40,7 @@ def lista(item):
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Next')
next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))

View File

@@ -302,7 +302,7 @@ def findvideos(item):
if "partes" in title:
action = "extract_url"
new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername)
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername, quality=calidad)
if comentarios.startswith("Ver en"):
itemlist_ver.append(new_item)
else:

View File

@@ -18,7 +18,7 @@ headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/2
IDIOMAS = {'Latino': 'Latino', 'Sub Español': 'VOS'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['yourupload', 'openload', 'sendvid', '']
list_servers = ['yourupload', 'openload', 'sendvid']
vars = {
'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://www.estadepelis.com/',

View File

@@ -319,7 +319,7 @@ def fichas(item):
contentTitle = scrapedtitle.strip()
if scrapedlangs != ">":
textoidiomas = extrae_idiomas(scrapedlangs)
textoidiomas, language = extrae_idiomas(scrapedlangs)
#Todo Quitar el idioma
title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])")
@@ -351,7 +351,7 @@ def fichas(item):
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
show=show, folder=True, contentType=contentType, contentTitle=contentTitle,
language =textoidiomas, infoLabels=infoLabels))
language =language, infoLabels=infoLabels))
## Paginación
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>')
@@ -797,16 +797,17 @@ def agrupa_datos(data):
def extrae_idiomas(bloqueidiomas):
logger.info("idiomas=" + bloqueidiomas)
# Todo cambiar por lista
#textoidiomas=[]
language=[]
textoidiomas = ''
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
for idioma in idiomas:
# TODO quitar esto
textoidiomas = textoidiomas + idioma +" "
#textoidiomas.append(idioma.upper())
# TODO y dejar esto
language.append(idioma)
return textoidiomas
return textoidiomas, language
def bbcode_kodi2html(text):

View File

@@ -127,9 +127,8 @@ def findvideos(item):
matches = scrapertools.find_multiple_matches(data, patron)
for url, server, calidad, idioma in matches:
title = item.contentTitle
server = servertools.get_server_from_url(url)
title = '%s [%s] [%s] [%s]' % (item.contentTitle, server, calidad, idioma)
itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=url, language = idioma,
contentTitle = item.contentTitle, quality = calidad, server = server))

View File

@@ -6,23 +6,12 @@
"language": ["cast"],
"thumbnail": "http://imgur.com/iLeISt0.png",
"banner": "pasateatorrent.png",
"fanart": "http://imgur.com/uexmGEg.png",
"version": 1,
"changes": [
{
"date": "06/12/2016",
"description": "Release"
},
{
"date": "13/01/2017",
"description": "Arreglo sagas en peliculas que no se mostraban.Mejoras en series/info-capitulos"
},
{
"date": "04/04/2017",
"description": "Migración httptools.Adaptación proxy según Kodi sea igual o menor v.17.Pequeñas mejoras código"
},
{
"date": "28/06/2017",
"description": "Correciones código y algunas mejoras"
"date": "25/08/2017",
"description": "Revamp"
}
],
"categories": [
@@ -38,6 +27,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"enabled": true,
"visible": true
}
]
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -99,10 +99,10 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
patron += '<span class="quality">([^<]+)</span><a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>' # year
patron += '<span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)
@@ -271,7 +271,7 @@ def series(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">'
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">' # img, title, url
matches = scrapertools.find_multiple_matches(data, patron)
@@ -323,8 +323,8 @@ def temporadas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
patron += '<img src="([^"]+)"></a></div>' # capítulos
patron = '<span class="title">([^<]+)<i>.*?' # season
patron += '<img src="([^"]+)"></a></div>' # img
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
@@ -365,9 +365,9 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url cap, img
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
@@ -390,7 +390,6 @@ def episodios(item):
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
@@ -407,6 +406,8 @@ def episodios(item):
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
@@ -427,12 +428,13 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?<img '
'src="http://pedropolis.com/wp-content/themes/dooplay/assets/img'
'/flags/(\w+)' % option)
idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]', 'pe': '[COLOR cornflowerblue](LAT)[/COLOR]',
'co': '[COLOR cornflowerblue](LAT)[/COLOR]', 'es': '[COLOR green](CAST)[/COLOR]',
'en': '[COLOR red](VOS)[/COLOR]', 'jp': '[COLOR green](VOS)[/COLOR]'}
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>-->(\w+)' % option)
lang = lang.lower()
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'drive': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
'subtitulado': '[COLOR red](VOS)[/COLOR]',
'ingles': '[COLOR red](VOS)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
@@ -455,10 +457,8 @@ def findvideos(item):
x.server.title(), x.quality, x.language)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
itemlist.append(Item(channel=__channel__,
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library",
thumbnail=get_thumb("videolibrary_movie.png"),
extra="findvideos", contentTitle=item.contentTitle))
thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle))
return itemlist

View File

@@ -213,9 +213,10 @@ def findvideos(item):
if 'openload' in url:
url = url + '|' + item.url
extra_info = title.split(' - ')
title = "%s - %s" % ('%s', title)
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, text_color=color3))
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, language=extra_info[0],
quality=extra_info[1],text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

View File

@@ -470,7 +470,7 @@ def findvideos(item):
videoitem.quality = 'default'
videoitem.language = 'Latino'
if videoitem.server != '':
videoitem.thumbnail = item.contentThumbnail
videoitem.thumbnail = item.thumbnail
else:
videoitem.thumbnail = item.thumbnail
videoitem.server = 'directo'

File diff suppressed because one or more lines are too long

View File

@@ -217,7 +217,7 @@ def findvideos(item):
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.action = 'play'
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
videoitem.thumbnail = item.thumbnail
videoitem.infoLabels = item.infoLabels
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
if 'youtube' in videoitem.url:

View File

@@ -1,38 +0,0 @@
{
"id": "verseriesonlinetv",
"name": "Veriesonlinetv",
"active": true,
"adult": false,
"language": ["lat"],
"banner": "verseriesonlinetv.png",
"thumbnail": "http://s6.postimg.org/gl0ok4t01/verserieslogo.png",
"version": 1,
"changes": [
{
"date": "17/12/2016",
"description": "Mejora código y adaptación Infoplus"
},
{
"date": "04/04/2017",
"description": "Migración a Httptools"
},
{
"date": "28/06/2017",
"description": "Corrección código y algunas mejoras"
}
],
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,9 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
@@ -38,7 +40,7 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<li><a href="([^"]+)">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -52,7 +54,7 @@ def lista(item):
logger.info()
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
@@ -83,10 +85,8 @@ def play(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.unescape(data)
logger.info(data)
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.thumbnail = item.thumbnail

View File

@@ -0,0 +1,32 @@
{
"id": "zonatorrent",
"name": "ZonaTorrent",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "",
"thumbnail": "https://zonatorrent.org/wp-content/uploads/2017/04/zonatorrent-New-Logo.png",
"version": 1,
"categories": [
"torrent",
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,159 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger
__channel__ = "zonatorrent"
HOST = 'https://zonatorrent.org'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Últimas Películas", action="listado", url=HOST, page=False))
itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico"))
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url=HOST))
itemlist.append(Item(channel=item.channel, title="Más vistas", action="listado", url=HOST + "/peliculas-mas-vistas/"))
itemlist.append(Item(channel=item.channel, title="Más votadas", action="listado", url=HOST + "/peliculas-mas-votadas/"))
itemlist.append(Item(channel=item.channel, title="Castellano", action="listado", url=HOST + "/?s=spanish",
page=True))
itemlist.append(Item(channel=item.channel, title="Latino", action="listado", url=HOST + "/?s=latino", page=True))
itemlist.append(Item(channel=item.channel, title="Subtitulado", action="listado", url=HOST + "/?s=Subtitulado",
page=True))
itemlist.append(Item(channel=item.channel, title="Con Torrent", action="listado", url=HOST + "/?s=torrent",
page=True))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=HOST + "/?s=",
page=False))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
for letra in "#ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(Item(channel=item.channel, action="listado", title=letra, page=True,
url=HOST + "/letters/%s/" % letra.replace("#", "0-9")))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<a href="#">Generos</a><ulclass="sub-menu">(.*?)</ul>')
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, page=True))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url + texto.replace(" ", "+")
try:
itemlist = listado(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
pattern = '<a href="(?P<url>[^"]+)"><div[^>]+><figure[^>]+><img[^>]+src="(?P<thumb>[^"]+)"[^>]+></figure></div>' \
'<h2 class="Title">(?P<title>.*?)</h2>.*?<span class="Time[^>]+>(?P<duration>.*?)</span><span ' \
'class="Date[^>]+>(?P<year>.*?)</span><span class="Qlty">(?P<quality>.*?)</span></p><div ' \
'class="Description"><p>.*?\:\s*(?P<plot>.*?)</p>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title, duration, year, quality, plot in matches:
#title = title.strip().replace("Spanish Online Torrent", "").replace("Latino Online Torrent", "").replace(r'\d{4}','')
title = re.sub('Online|Spanish|Latino|Torrent|\d{4}','',title)
infoLabels = {"year": year}
aux = scrapertools.find_single_match(duration, "(\d+)h\s*(\d+)m")
duration = "%s" % ((int(aux[0]) * 3600) + (int(aux[1]) * 60))
infoLabels["duration"] = duration
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
contentTitle=title, plot=plot, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page:
pattern = "<span class='page-numbers current'>[^<]+</span><a class='page-numbers' href='([^']+)'"
url = scrapertools.find_single_match(data, pattern)
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url, page=True,
thumbnail=get_thumb("next.png")))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = re.sub(r"&quot;", '"', data)
data = re.sub(r"&lt;", '<', data)
titles = re.compile('data-TPlayerNv="Opt\d+">.*? <span>(.*?)</span></li>', re.DOTALL).findall(data)
urls = re.compile('id="Opt\d+"><iframe[^>]+src="([^"]+)"', re.DOTALL).findall(data)
if len(titles) == len(urls):
for i in range(0, len(titles)):
if i > 0:
title = "Online %s " % titles[i].strip()
else:
title = titles[0]
if "goo.gl" in urls[i]:
urls[i] = httptools.downloadpage(urls[i], follow_redirects=False, only_headers=True)\
.headers.get("location", "")
videourl = servertools.findvideos(urls[i])
if len(videourl) > 0:
itemlist.append(Item(channel=item.channel, action="play", title=title, url=videourl[0][1],
server=videourl[0][0], thumbnail=videourl[0][3], fulltitle=item.title))
pattern = '<a[^>]+href="([^"]+)"[^<]+</a></td><td><span><img[^>]+>(.*?)</span></td><td><span><img[^>]+>(.*?)' \
'</span></td><td><span>(.*?)</span>'
torrents = re.compile(pattern, re.DOTALL).findall(data)
if len(torrents) > 0:
for url, text, lang, quality in torrents:
title = "%s %s - %s" % (text, lang, quality)
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, server="torrent",
fulltitle=item.title, thumbnail=get_thumb("channels_torrent.png")))
return itemlist

View File

@@ -446,9 +446,7 @@ def mkdir(path):
except:
logger.error("ERROR al crear el directorio: %s" % path)
logger.error(traceback.format_exc())
# platformtools.dialog_notification("Error al crear el directorio", path)
platformtools.dialog_ok("Alfa", "[COLOR red][B]IMPORTANTE[/B][/COLOR] - Instale y Ejecute el script 'Fix version 1.7.0', que se encuentra en el repositorio de Alfa y "
"vuelva a entrar en el addon, [B]si no lo hace tendrá problemas[/B]")
platformtools.dialog_notification("Error al crear el directorio", path)
return False
else:
return True

View File

@@ -14,7 +14,7 @@
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://drive.google.com/file/d/([^/]+)/preview",
"pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{

View File

@@ -22,7 +22,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "//(?:www.)?ok.../(?:videoembed|video)/(\\d+)",
"pattern": "(?:www.)?ok.../(?:videoembed|video)/(\\d+)",
"url": "http://ok.ru/videoembed/\\1"
}
]
@@ -63,4 +63,4 @@
],
"thumbnail": "server_okru.png",
"version": 1
}
}

View File

@@ -18,7 +18,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:openload|oload).../(?:embed|f)/([0-9a-zA-Z-_]+)",
"pattern": "(?:openload|oload).*?/(?:embed|f)/([0-9a-zA-Z-_]+)",
"url": "https://openload.co/embed/\\1/"
}
]
@@ -57,4 +57,4 @@
],
"thumbnail": "server_openload.png",
"version": 1
}
}