Merge pull request #448 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-09-27 08:36:04 -05:00
committed by GitHub
12 changed files with 69 additions and 235 deletions

View File

@@ -2,13 +2,11 @@
import re
from core import filetools
from core import jsontools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import videolibrarytools
from core.item import Item
from platformcode import config, platformtools, logger
from channelselector import get_thumb
@@ -32,19 +30,16 @@ else:
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Películas", text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades",
url="http://www.clasicofilm.com/feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost",
url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost",
thumbnail=get_thumb('newest', auto=True), text_color=color1))
itemlist.append(item.clone(action="generos", title=" Por géneros", url=host,
thumbnail=get_thumb('genres', auto=True), text_color=color1))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
@@ -56,7 +51,6 @@ def configuracion(item):
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
texto = texto.replace(" ", "%20")
item.url = host + "search?q=%s" % texto
try:
@@ -75,20 +69,17 @@ def newest(categoria):
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://www.clasicofilm.com/feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost"
item.url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
@@ -96,13 +87,10 @@ def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
for link in entry["link"]:
if link["rel"] == "alternate":
@@ -124,17 +112,12 @@ def peliculas(item):
url=url, thumbnail=thumbnail, infoLabels=infolabels,
contentTitle=fulltitle, contentType="movie")
itemlist.append(new_item)
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
tmdb.set_infoLabels(itemlist, __modo_grafico__)
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
totalresults = int(data["openSearch$totalResults"]["$t"])
if actualpage + 20 < totalresults:
url_next = item.url.replace("start-index=" + str(actualpage), "start-index=" + str(actualpage + 20))
itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=url_next))
return itemlist
@@ -163,7 +146,6 @@ def busqueda(item):
def generos(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
@@ -174,50 +156,35 @@ def generos(item):
.replace("recentpostslist", "finddatepost")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, text_color=color3))
itemlist.sort(key=lambda x: x.title)
return itemlist
def findvideos(item):
def decodifica_id(txt):
res = ''
for i in range(0, len(txt), 3):
res += '\\u0' + txt[i:i+3]
return res.decode('unicode-escape') #Ej: {"v":"9KD2iEmiYLsF"}
def findvideos(item):
logger.info()
itemlist = []
if item.infoLabels["tmdb_id"]:
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
library_path = config.get_videolibrary_path()
if "data:text/javascript;base64" in data:
div_id = scrapertools.find_single_match(data, '<div id="([0-9a-fA-F]+)"')
# ~ logger.info(div_id)
vid_id = scrapertools.find_single_match(decodifica_id(div_id), ':"([^"]+)"')
# ~ logger.info(vid_id)
itemlist.append(item.clone(url='http://netu.tv/watch_video.php?v='+vid_id, server='netutv', action='play'))
else:
iframe = scrapertools.find_single_match(data, '<iframe width="720".*?src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
if config.get_videolibrary_support():
title = "Añadir película a la videoteca"
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
movie_path = filetools.join(config.get_videolibrary_path(), 'CINE')
files = filetools.walk(movie_path)
for dirpath, dirname, filename in files:
for f in filename:
if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
canales = it.library_urls.keys()
canales.sort()
if "clasicofilm" in canales:
canales.pop(canales.index("clasicofilm"))
canales.insert(0, "[COLOR red]clasicofilm[/COLOR]")
title = "Película ya en tu videoteca. [%s] ¿Añadir?" % ",".join(canales)
break
except:
import traceback
logger.error(traceback.format_exc())
itemlist.append(item.clone(action="add_pelicula_to_library", title=title))
token_auth = config.get_setting("token_trakt", "tvmoviedb")
if token_auth and item.infoLabels["tmdb_id"]:
itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt",
extra="movie"))
itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca"))
return itemlist

View File

@@ -133,14 +133,15 @@ def episodios(item):
itemlist = []
infoLabels = {}
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img alt=".+?" src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?'
patron_caps += '<a .+? href="([^"]+)">([^"]+)<\/a>'
patron = '(?s)<ul class="episodios">(.+?)<\/ul>'
data_lista = scrapertools.find_single_match(data,patron)
contentSerieName = item.title
patron_caps = 'href="([^"]+)".*?'
patron_caps += 'src="([^"]+)".*?'
patron_caps += 'numerando">([^<]+).*?'
patron_caps += 'link_go">.*?>([^<]+)'
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
for scrapedurl, scrapedthumbnail, scrapedtempepi, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
@@ -150,8 +151,8 @@ def episodios(item):
itemlist.append(item.clone(thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + contentSerieName + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=contentSerieName))
return itemlist
@@ -159,16 +160,17 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data1 = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav>?\s<\/div><\/div>')
patron = '<div id="playex" .+?>(.+?)<\/nav>'
data1 = scrapertools.find_single_match(data, patron)
patron = "changeLink\('([^']+)'\)"
matches = re.compile(patron, re.DOTALL).findall(data1)
matches = scrapertools.find_multiple_matches(data1, patron)
for url64 in matches:
url1 =base64.b64decode(url64)
if 'danimados' in url1:
new_data = httptools.downloadpage('https:'+url1.replace('stream', 'stream_iframe')).data
logger.info("Intel33 %s" %new_data)
url = 'https:'+url1.replace('stream/', 'stream_iframe/')
id = scrapertools.find_single_match(url, 'iframe/(.*)')
url = url.replace(id, base64.b64encode(id))
new_data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(new_data, "sources: \[\{file:'([^']+)")
if "zkstream" in url:
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")

View File

@@ -1,12 +0,0 @@
{
"id": "pelispekes",
"name": "PelisPekes",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "pelispekes.png",
"banner": "pelispekes.png",
"categories": [
"movie"
]
}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
if item.url == "":
item.url = "http://www.pelispekes.com/"
data = scrapertools.cachePage(item.url)
patron = '<div class="poster-media-card"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<div class="poster"[^<]+'
patron += '<div class="title"[^<]+'
patron += '<span[^<]+</span[^<]+'
patron += '</div[^<]+'
patron += '<span class="rating"[^<]+'
patron += '<i[^<]+</i><span[^<]+</span[^<]+'
patron += '</span[^<]+'
patron += '<div class="poster-image-container"[^<]+'
patron += '<img width="\d+" height="\d+" src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
plot=plot, contentTitle=title, contentThumbnail=thumbnail))
# Extrae la pagina siguiente
next_page_url = scrapertools.find_single_match(data,
'<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url,
viewmode="movie"))
return itemlist
def findvideos(item):
logger.info("item=" + item.tostring())
data = scrapertools.cachePage(item.url)
data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=")
item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>(.*?)<div')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
logger.info("plot=" + item.plot)
return servertools.find_video_items(item=item, data=data)

View File

@@ -1,6 +1,6 @@
{
"id": "seriesblanco",
"name": "SeriesBlanco",
"name": "SeriesBlanco.org",
"active": true,
"adult": false,
"language": ["cast", "lat"],

View File

@@ -3245,6 +3245,10 @@ msgctxt "#70135"
msgid "Custom Colours"
msgstr "Colores Personalizados"
msgctxt "#70136"
msgid "Tv Show"
msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
@@ -4000,7 +4004,7 @@ msgid "Error in the user and/or password. Check your credentials"
msgstr "Error en el usuario y/o contraseña. Comprueba tus credenciales"
msgctxt "#70331"
msgid "Error during login. Check your credentials
msgid "Error during login. Check your credentials"
msgstr "Error durante el login. Comprueba tus credenciales"
msgctxt "#70332"
@@ -4052,7 +4056,7 @@ msgid "[Trakt] Remove %s from your watchlist"
msgstr "[Trakt] Eliminar %s de tu watchlist"
msgctxt "#70344"
msgid "Add to %s your watchlist""
msgid "Add to %s your watchlist"
msgstr "[Trakt] Añadir %s a tu watchlist"
msgctxt "#70345"
@@ -4060,7 +4064,7 @@ msgid "[Trakt] Remove %s from your collection"
msgstr "[Trakt] Eliminar %s de tu colección"
msgctxt "#70346"
msgid "[Trakt] Add %s to your collection
msgid "[Trakt] Add %s to your collection"
msgstr "[Trakt] Añadir %s a tu colección"
msgctxt "#70347"
@@ -4684,8 +4688,8 @@ msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgctxt "#70501"
msgid "Search did not match (%s)"
@@ -4928,7 +4932,7 @@ msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70561"
msgid "Search Similar
msgid "Search Similar"
msgstr "Buscar Similares"

View File

@@ -3245,6 +3245,10 @@ msgctxt "#70135"
msgid "Custom Colours"
msgstr "Colores Personalizados"
msgctxt "#70136"
msgid "Tv Show"
msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
@@ -4000,7 +4004,7 @@ msgid "Error in the user and/or password. Check your credentials"
msgstr "Error en el usuario y/o contraseña. Comprueba tus credenciales"
msgctxt "#70331"
msgid "Error during login. Check your credentials
msgid "Error during login. Check your credentials"
msgstr "Error durante el login. Comprueba tus credenciales"
msgctxt "#70332"
@@ -4052,7 +4056,7 @@ msgid "[Trakt] Remove %s from your watchlist"
msgstr "[Trakt] Eliminar %s de tu watchlist"
msgctxt "#70344"
msgid "Add to %s your watchlist""
msgid "Add to %s your watchlist"
msgstr "[Trakt] Añadir %s a tu watchlist"
msgctxt "#70345"
@@ -4060,7 +4064,7 @@ msgid "[Trakt] Remove %s from your collection"
msgstr "[Trakt] Eliminar %s de tu colección"
msgctxt "#70346"
msgid "[Trakt] Add %s to your collection
msgid "[Trakt] Add %s to your collection"
msgstr "[Trakt] Añadir %s a tu colección"
msgctxt "#70347"
@@ -4684,8 +4688,8 @@ msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgctxt "#70501"
msgid "Search did not match (%s)"
@@ -4928,7 +4932,7 @@ msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70561"
msgid "Search Similar
msgid "Search Similar"
msgstr "Buscar Similares"

View File

@@ -22,7 +22,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
video_urls = []
videos = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+).*?label:"([^"]+)')
for video, label in videos:
video_urls.append([label + " [clipwatching]", video])
logger.info("Url: %s" % videos)
if ".jpg" not in video:
video_urls.append([label + " [clipwatching]", video])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

View File

@@ -8,6 +8,8 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if "Contenido rechazado" in response.data:
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
if response.code == 404:
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
return True, ""

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "streamix.cloud/(?:embed-|)([A-z0-9]+)",
"url": "http://streamix.cloud/embed-\\1.html"
}
]
},
"free": true,
"id": "streamixcloud",
"name": "streamixcloud",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/NuD85Py.png?1"
}

View File

@@ -1,30 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
if "Video is processing" in data:
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
packed = scrapertools.find_single_match(data, patron)
data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
for url in media_url:
video_urls.append(["%s [streamixcloud]" % ext, url])
return video_urls

View File

@@ -11,6 +11,8 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "Video not found..." in data:
return False, config.get_localized_string(70292) % "Thevid"
if "Video removed for inactivity..." in data:
return False, "[Thevid] El video ha sido removido por inactividad"
return True, ""