Merge pull request #100 from Intel11/patch-1

Actualizados
This commit is contained in:
Alfa
2017-09-26 22:58:17 +02:00
committed by GitHub
6 changed files with 83 additions and 83 deletions

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
@@ -48,13 +46,11 @@ def mainlist(item):
"/0/Genre.png",
text_color=color1))
url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/")
itemlist.append(item.clone(title="Documentales", text_bold=True, text_color=color2, action=""))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=url, text_color=color1,
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "genero/documental/", text_color=color1,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/Documentaries.png"))
url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/?orderby=title&order=asc&gdsr_order=asc")
itemlist.append(item.clone(action="peliculas", title=" Por orden alfabético", text_color=color1, url=url,
itemlist.append(item.clone(action="peliculas", title=" Por orden alfabético", text_color=color1, url=CHANNEL_HOST + "genero/documental/?orderby=title&order=asc&gdsr_order=asc",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/A-Z.png"))
itemlist.append(item.clone(title="", action=""))
@@ -100,7 +96,7 @@ def newest(categoria):
itemlist.pop()
elif categoria == 'documentales':
item.url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/")
item.url = CHANNEL_HOST + "genero/documental/"
item.action = "peliculas"
itemlist = peliculas(item)
@@ -108,7 +104,7 @@ def newest(categoria):
itemlist.pop()
elif categoria == 'infantiles':
item.url = urlparse.urljoin(CHANNEL_HOST, "genero/infantil/")
item.url = CHANNEL_HOST + "genero/infantil/"
item.action = "peliculas"
itemlist = peliculas(item)
@@ -130,7 +126,6 @@ def peliculas(item):
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '(?s)class="(?:result-item|item movies)">.*?<img src="([^"]+)'
patron += '.*?alt="([^"]+)"'
@@ -156,11 +151,6 @@ def peliculas(item):
if year:
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
try:
# tmdb.set_infoLabels(itemlist, __modo_grafico__)
a = 1
except:
pass
# Extrae el paginador
next_page_link = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)')
@@ -218,7 +208,6 @@ def generos(item):
scrapedtitle = unicode(scrapedtitle, "utf8").capitalize().encode("utf8")
if scrapedtitle == "Erotico" and config.get_setting("adult_mode") == 0:
continue
itemlist.append(item.clone(action="peliculas", title=scrapedtitle, url=scrapedurl))
return itemlist
@@ -228,9 +217,9 @@ def idioma(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Español", url="http://www.cinetux.net/idioma/espanol/"))
itemlist.append(item.clone(action="peliculas", title="Latino", url="http://www.cinetux.net/idioma/latino/"))
itemlist.append(item.clone(action="peliculas", title="VOSE", url="http://www.cinetux.net/idioma/subtitulado/"))
itemlist.append(item.clone(action="peliculas", title="Español", url= CHANNEL_HOST + "idioma/espanol/"))
itemlist.append(item.clone(action="peliculas", title="Latino", url= CHANNEL_HOST + "idioma/latino/"))
itemlist.append(item.clone(action="peliculas", title="VOSE", url= CHANNEL_HOST + "idioma/subtitulado/"))
return itemlist
@@ -290,7 +279,6 @@ def findvideos(item):
else:
itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3))
return itemlist
@@ -342,12 +330,12 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
if filtro_idioma == 3 or item.filtro:
lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
url=scrapedurl, server=scrapedserver, idioma=scrapedlanguage,
extra=item.url))
extra=item.url, contentThumbnail = item.thumbnail))
else:
idioma = dict_idiomas[language]
if idioma == filtro_idioma:
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", url=scrapedurl,
extra=item.url))
extra=item.url, contentThumbnail = item.thumbnail))
else:
if language not in filtrados:
filtrados.append(language)
@@ -363,7 +351,6 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
def play(item):
logger.info()
itemlist = []
video_urls = []
if "api.cinetux" in item.url:
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
@@ -379,15 +366,6 @@ def play(item):
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
"location", "")
item.url = scrapedurl
else:
return [item]
itemlist.append(
Item(channel = item.channel,
action = "play",
fulltitle = item.fulltitle,
thumbnail = item.thumbnail,
server = "",
url = item.url
))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
item.thumbnail = item.contentThumbnail
item.server = servertools.get_server_from_url(item.url)
return [item]

View File

@@ -1,24 +1,24 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = "http://gnula.nu/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Estrenos", action="peliculas",
url="http://gnula.nu/peliculas-online/lista-de-peliculas-online-parte-1/", viewmode="movie"))
url= host +"peliculas-online/lista-de-peliculas-online-parte-1/", viewmode="movie"))
itemlist.append(
Item(channel=item.channel, title="Generos", action="generos", url="http://gnula.nu/generos/lista-de-generos/"))
Item(channel=item.channel, title="Generos", action="generos", url= host + "generos/lista-de-generos/"))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="peliculas",
url="http://gnula.nu/peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie"))
# itemlist.append( Item(channel=item.channel, title="Portada" , action="portada" , url="http://gnula.nu/"))
url= host + "peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie"))
return itemlist
@@ -26,23 +26,23 @@ def generos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
# <span style="font-weight: bold;">Lista de géneros</span><br/>
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<spa[^>]+>Lista de g(.*?)/table')
# <strong>Historia antigua</strong> [<a href="http://gnula.nu/generos/lista-de-peliculas-del-genero-historia-antigua/"
patron = '<strong>([^<]+)</strong> .<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for genero, scrapedurl in matches:
title = scrapertools.htmlclean(genero)
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
url = item.url + scrapedurl
thumbnail = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action='peliculas', title=title, url=url, thumbnail=thumbnail, plot=plot,
extra=title, viewmode="movie"))
itemlist.append(Item(channel = item.channel,
action = 'peliculas',
title = title,
url = url,
thumbnail = thumbnail,
plot = plot,
viewmode = "movie"))
itemlist = sorted(itemlist, key=lambda item: item.title)
@@ -52,17 +52,9 @@ def generos(item):
def peliculas(item):
logger.info()
'''
<a class="Ntooltip" href="http://gnula.nu/comedia-romantica/ver-with-this-ring-2015-online/">With This Ring<span><br/>
<img src="http://gnula.nu/wp-content/uploads/2015/06/With_This_Ring2.gif"></span></a> [<span style="color: #33ccff;">18/07/15</span> <span style="color: #33ff33;">(VS)</span><span style="color: red;">(VC)</span><span style="color: #cc66cc;">(VL)</span>] [<span style="color: #ffcc99;">HD-R</span>]&#8212;&#8211;<strong>Comedia, Romántica</strong><br/>
'''
'''
<a class="Ntooltip" href="http://gnula.nu/aventuras/ver-las-aventuras-de-tintin-el-secreto-del-unicornio-2011-online/">The Adventures of Tintin<span><br />
<img src="http://gnula.nu/wp-content/uploads/2015/07/The_Adventures_of_Tintin_Secret_of_the_Unicorn2.gif"></span></a> (2011) [<span style="color: #33ccff;">10/07/15</span> <span style="color: #33ff33;">(VS)</span><span style="color: red;">(VC)</span><span style="color: #cc66cc;">(VL)</span>] [<span style="color: #ffcc99;">DVD-R</span>]&#8212;&#8211;<strong>Animación, Infantil, Aventuras</strong><br />
'''
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -70,34 +62,57 @@ def peliculas(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
plot = scrapertools.htmlclean(resto).strip()
title = scrapedtitle + " " + plot
fulltitle = title
contentTitle = scrapedtitle
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, fulltitle=fulltitle, url=url,
thumbnail=thumbnail, plot=plot, extra=title, hasContentDetails=True,
contentTitle=contentTitle, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
url = item.url + scrapedurl
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
title = title,
url = url,
thumbnail = scrapedthumbnail,
plot = plot,
hasContentDetails = True,
contentTitle = contentTitle,
contentType = "movie",
context = ["buscar_trailer"]
))
return itemlist
def findvideos(item):
logger.info("item=" + item.tostring())
itemlist = []
# Descarga la página para obtener el argumento
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
patron = 'Ver película online.*?>.*?>([^<]+)'
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
bloque = scrapertools.find_multiple_matches(data, 'contenedor_tab.*?/table')
cuenta = 0
for datos in bloque:
cuenta = cuenta + 1
patron = '<em>(opción %s.*?)</em>' %cuenta
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcion = "(" + scrapertools.find_single_match(scrapedopcion, "op.*?, (.*)").upper() + ")"
if "TRAILER" in titulo_opcion or titulo_opcion == "()":
titulo_opcion = "(" + titulo_opcional + ")"
urls = scrapertools.find_multiple_matches(datos, '(?:src|href)="([^"]+)')
titulo = "Ver en %s " + titulo_opcion
for url in urls:
itemlist.append(Item(channel = item.channel,
action = "play",
contentThumbnail = item.thumbnail,
fulltitle = item.contentTitle,
title = titulo,
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
newthumbnail = scrapertools.find_single_match(data,
'<div class="entry"[^<]+<p align="center"><img alt="[^"]+" src="([^"]+)"')
if newthumbnail != "":
item.thumbnail = newthumbnail
item.contentThumbnail = newthumbnail
logger.info("plot=" + item.plot)
return servertools.find_video_items(item=item, data=data)
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -306,7 +306,6 @@ def fichas(item):
'<div class="c_fichas_data".*?marked="([^"]*)".*?serie="([^"]*)".*?' \
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, marca, serie, episodio, scrapedtitle in matches:
tipo = "movie"
scrapedurl = host + scrapedurl.rsplit("-dc=")[0]

View File

@@ -19,7 +19,7 @@
"patterns": [
{
"pattern": "flashx.(?:tv|pw)/(?:embed.php\\?c=|embed-|playvid-|)([A-z0-9]+)",
"url": "https://www.flashx.tv/playvid-\\1.html"
"url": "https://www.flashx.tv/\\1.html"
}
]
},

View File

@@ -27,8 +27,6 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
page_url = page_url.replace("playvid-", "")
headers = {'Host': 'www.flashx.tv',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
@@ -60,8 +58,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
if "You try to access this video with Kodi" in data:
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try:
data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except:
pass
matches = scrapertools.find_multiple_matches(data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
video_urls = []
for match in matches:
try:

View File

@@ -18,7 +18,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "rapidvideo.(?:org|com)/(?:\\?v=|e/|embed/)([A-z0-9]+)",
"pattern": "rapidvideo.(?:org|com)/(?:\\?v=|e/|embed/|v/)([A-z0-9]+)",
"url": "https://www.rapidvideo.com/e/\\1"
}
]