Merge pull request #180 from Intel11/ultimo

Actualizados
This commit is contained in:
Alfa
2017-11-24 05:04:37 +01:00
committed by GitHub
7 changed files with 53 additions and 108 deletions

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -25,29 +25,15 @@ list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def lista_gen(item):
logger.info()
@@ -179,11 +165,10 @@ def findvideos(item):
for link in itemla:
if server in link:
url = link.replace('" + ID' + server + ' + "', str(id))
if "drive" in server:
server1 = 'Gvideo'
else:
server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize())))
itemlist.append(item.clone(url=url, action="play",
title="Enlace encontrado en %s "
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
scrapertools.printMatches(itemlist)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -2,11 +2,15 @@
import re
from core import filetools
from core import jsontools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import videolibrarytools
from core.item import Item
from platformcode import config, logger
from platformcode import config, platformtools, logger
host = "http://www.clasicofilm.com/"
# Configuracion del canal
@@ -47,7 +51,6 @@ def mainlist(item):
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
@@ -55,13 +58,9 @@ def configuracion(item):
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
cx = scrapertools.find_single_match(data, "var cx = '([^']+)'")
texto = texto.replace(" ", "%20")
item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www.google.com&start=0" % (
cx, texto)
item.url = host + "search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -104,7 +103,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
from core import jsontools
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
@@ -133,7 +131,6 @@ def peliculas(item):
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
totalresults = int(data["openSearch$totalResults"]["$t"])
if actualpage + 20 < totalresults:
@@ -146,48 +143,22 @@ def peliculas(item):
def busqueda(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
from core import jsontools
data = jsontools.load(data)
for entry in data["results"]:
try:
title = entry["richSnippet"]["metatags"]["ogTitle"]
url = entry["richSnippet"]["metatags"]["ogUrl"]
thumbnail = entry["richSnippet"]["metatags"]["ogImage"]
except:
continue
try:
title_split = re.split(r"\s*\((\d)", title, 1)
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')
fulltitle = title_split[0]
except:
fulltitle = title
year = ""
if not "DVD" in title and not "HDTV" in title and not "HD-" in title:
continue
infolabels = {'year': year}
new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle,
url=url, thumbnail=thumbnail, infoLabels=infolabels,
contentTitle=fulltitle, contentType="movie")
itemlist.append(new_item)
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)'))
totalresults = int(data["cursor"]["resultCount"])
if actualpage + 20 <= totalresults:
url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20))
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next))
patron = """post-title entry-titl.*?href='([^']+)'"""
patron += """>([^<]+).*?"""
patron += """src="([^"]+)"""
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]{4})\)")
ctitle = scrapedtitle.split("(")[0].strip()
itemlist.append(item.clone(action = "findvideos",
contentTitle = ctitle,
infoLabels = {"year" : year},
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
return itemlist
@@ -197,9 +168,10 @@ def generos(item):
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<b>([^<]+)</b><br/>\s*<script src="([^"]+)"'
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedurl = scrapedurl.replace("&amp;","&")
scrapedurl = scrapedurl.replace("max-results=500", "start-index=1&max-results=20") \
.replace("recentpostslist", "finddatepost")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
@@ -210,13 +182,13 @@ def generos(item):
def findvideos(item):
from core import servertools
if item.infoLabels["tmdb_id"]:
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
@@ -226,13 +198,11 @@ def findvideos(item):
title = "Añadir película a la videoteca"
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
from core import filetools
movie_path = filetools.join(config.get_videolibrary_path(), 'CINE')
files = filetools.walk(movie_path)
for dirpath, dirname, filename in files:
for f in filename:
if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
from core import videolibrarytools
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
canales = it.library_urls.keys()
canales.sort()

View File

@@ -696,7 +696,6 @@ def findvideos(item):
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
scrapertools.printMatches(it2)
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Channel Ver-peliculas -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
@@ -18,7 +18,7 @@ from core import tmdb
__channel__ = "ver-peliculas"
host = "http://ver-peliculas.org/"
host = "http://ver-peliculas.io/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
@@ -122,10 +122,8 @@ def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug(data)
pattern = '<a href="([^"]+)"[^>]+><img (?:src)?(?:data-original)?="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, pattern)
for url, thumb, title in matches:
year = scrapertools.find_single_match(url, '-(\d+)-online')
title = title.replace("Película", "", 1).partition(" /")[0].partition(":")[0]
@@ -135,10 +133,9 @@ def listado(item):
infoLabels={"year": year},
url=url,
thumbnail=thumb,
contentTitle=title
contentTitle=title.strip()
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<ul class="pagination">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, '<a href="#">\d+</a>.*?<a href="([^"]+)">')
@@ -172,8 +169,7 @@ def findvideos(item):
duplicated = []
data = get_source(item.url)
logger.debug(data)
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
video_info = scrapertools.find_single_match(data, "load_player\('([^']+).*?([^']+)")
movie_info = scrapertools.find_single_match(item.url,
'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.')
movie_host = movie_info[0]
@@ -186,7 +182,7 @@ def findvideos(item):
video_list = json_data['lista']
itemlist = []
for videoitem in video_list:
video_base_url = 'http://ver-peliculas.org/core/videofinal.php'
video_base_url = host + '/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
languages = ['latino', 'spanish', 'subtitulos']
@@ -200,28 +196,22 @@ def findvideos(item):
playlist = jsontools.load(data)
sources = playlist[['playlist'][0]]
server = playlist['server']
for video_link in sources:
url = video_link['sources']
# if 'onevideo' in url:
# data = get_source(url)
# g_urls = servertools.findvideos(data=data)
# url = g_urls[0][1]
# server = g_urls[0][0]
if url not in duplicated and server!='drive':
lang = lang.capitalize()
if lang == 'Spanish':
lang = 'Español'
title = '(%s) %s (%s)' % (server, item.title, lang)
title = 'Ver en %s [' + lang + ']'
thumbnail = servertools.guess_server_thumbnail(server)
itemlist.append(item.clone(title=title,
url=url,
server=server,
thumbnail=thumbnail,
action='play'
))
duplicated.append(url)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
@@ -235,6 +225,11 @@ def findvideos(item):
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(category):
logger.info()
item = Item()

View File

@@ -99,14 +99,12 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = 'class="post-item-image btn-play-item".*?'
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'post-item-flags"> (.*?)</div.*?'
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
patron_next_page = 'href="([^"]+)"> &raquo;'
@@ -125,11 +123,11 @@ def peliculas(item):
idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
@@ -188,7 +186,7 @@ def findvideos(item):
language=idioma.strip(),
title="Ver en %s %s" %(server, calidad)
))
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(item.clone(title=k, folder=False,

View File

@@ -16,8 +16,6 @@ def test_video_exists(page_url):
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
if "File was deleted" in data:
return False, "[powvideo] El archivo no existe o ha sido borrado"
if "el archivo ha sido borrado por no respetar" in data.lower():
return False, "[powvideo] El archivo no existe o ha sido borrado por no respetar los Terminos de uso"

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:thevideo.me|tvad.me)/(?:embed-|)([A-z0-9]+)",
"pattern": "(?:thevideo.me|tvad.me|thevid.net)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
}
]