Merge pull request #70 from Alfa-beto/channels

ajusted y reparacion de canales
This commit is contained in:
Alfa
2017-09-10 03:37:03 +02:00
committed by GitHub
14 changed files with 91 additions and 101 deletions

View File

@@ -172,11 +172,11 @@ def findvideos(item):
itemlist = []
itemtemp = []
for scrapedurl, nombre_servidor, idioma, calidad in matches:
idioma = idioma.strip()
calidad = calidad.strip()
if "youapihd" in nombre_servidor.lower():
nombre_servidor = "gvideo"
for scrapedurl, server_name, language, quality in matches:
language = language.strip()
quality = quality.strip()
if "youapihd" in server_name.lower():
server_name = "gvideo"
if "pelismundo" in scrapedurl:
data = httptools.downloadpage(scrapedurl, add_referer = True).data
patron = 'sources.*?}],'
@@ -193,8 +193,10 @@ def findvideos(item):
fulltitle = item.title,
server = "directo",
thumbnail = item.thumbnail,
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + videoitem[0] + ")",
url = videoitem[1]
title = server_name + " (" + language + ") (Calidad " + videoitem[0] + ")",
url = videoitem[1],
language = language,
quality = videoitem[0]
))
else:
itemlist.append(Item(channel=item.channel,
@@ -202,10 +204,12 @@ def findvideos(item):
extra = "",
fulltitle = item.title,
server = "",
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad + ")",
title = server_name + " (" + language + ") (Calidad " + quality + ")",
thumbnail = item.thumbnail,
url = scrapedurl,
folder = False
folder = False,
language = language,
quality = quality
))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -150,7 +150,7 @@ def peliculas(item):
title = "%s (%s)" % (contentTitle, quality)
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
contentQuality=quality, contentTitle=contentTitle, infoLabels = {'year':year}))
quality=quality, contentTitle=contentTitle, infoLabels = {'year':year}))
if item.title != 'Versión original':
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -77,25 +77,31 @@ def agregadas(item):
itemlist = []
data = scrapertools.cache_page(item.url)
logger.info("data=" + data)
data = re.sub(r'\n|\r|\t|\s{2}| |"', "", data)
patron = 'class=\'reflectMe\' src="([^"]+).*?class="infor".*?href="([^"]+).*?<h2>(.*?)<.*?class="sinopsis">(.*?)<' # url
patron = scrapertools.find_multiple_matches (data,'<divclass=col-mt-5 postsh>.*?Duración')
matches = re.compile(patron, re.DOTALL).findall(data)
for element in patron:
info = scrapertools.find_single_match(element,
"calidad>(.*?)<.*?ahref=(.*?)>.*?'reflectMe' src=(.*?)\/>.*?<h2>(.*?)"
"<\/h2>.*?sinopsis>(.*?)<\/div>.*?Año:<\/span>(.*?)<\/li>")
quality = info[0]
url = info[1]
thumbnail = info[2]
title = info[3]
plot = info[4]
year = info[5].strip()
for thumbnail, url, title, sinopsis in matches:
url = urlparse.urljoin(item.url, url)
thumbnail = urlparse.urljoin(url, thumbnail)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
thumbnail=thumbnail, show=title, plot=sinopsis))
itemlist.append(Item(channel=item.channel, title=title, url=url, action='findvideos',thumbnail=thumbnail,
plot=plot,
quality=quality, infoLabels={'year':year}))
# Paginación
try:
patron = 'tima">.*?href="([^"]+)" ><i'
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
next_page = re.compile(patron, re.DOTALL).findall(data)
itemlist.append(Item(channel=item.channel, action="agregadas", title="Página siguiente >>", url=next_page[0],
itemlist.append(Item(channel=item.channel, action="agregadas", title='Pagina Siguiente >>',
url=next_page.strip(),
viewmode="movie_with_plot"))
except:
pass
@@ -135,12 +141,16 @@ def findvideos(item):
for scrapedidioma, scrapedcalidad, scrapedurl in matches:
idioma = ""
scrapedserver = re.findall("http[s*]?://(.*?)/", scrapedurl)
title = item.title + " [" + scrapedcalidad + "][" + scrapedidioma + "][" + scrapedserver[0] + "]"
title = item.title + " [" + scrapedcalidad + "][" + scrapedidioma +"]"
quality = scrapedcalidad
language = scrapedidioma
if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl, thumbnail="",
plot=plot, show=item.show))
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl,
thumbnail="", plot=plot, show=item.show, quality= quality, language=language))
itemlist=servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -154,35 +154,34 @@ def scraper(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.contentType == "movie":
patron = scrapertools.find_multiple_matches(data,
'<div class="poster"><a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)".*?/flags/(.*?).png.*?<span>(.*?)</span>')
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?img\/flags\/(.*?)\.png.*?imdb.*?<span>(.*?)>')
for url, thumb, title, idioma, year in patron:
for thumb, url, title, language, year in patron:
titulo = title
title = re.sub(r"!|¡", "", title)
title = title.replace("Autosia", "Autopsia")
title = re.sub(r"&#8217;|PRE-Estreno", "'", title)
new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url,
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True)
new_item.infoLabels['year'] = year
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True,
language= language, infoLabels={'year':year})
itemlist.append(new_item)
else:
patron = scrapertools.find_multiple_matches(data,
'<div class="poster"><a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)".*?<span>(.*?)</span>')
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?<span>(.*?)<')
for url, thumb, title, year in patron:
for thumb, url, title, year in patron:
titulo = title.strip()
title = re.sub(r"\d+x.*", "", title)
new_item = item.clone(action="findtemporadas", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url,
thumbnail=thumb, fulltitle=title, contentTitle=title, show=title,
contentType="tvshow", library=True)
new_item.infoLabels['year'] = year
contentType="tvshow", library=True, infoLabels={'year':year})
itemlist.append(new_item)
## Paginación
@@ -282,7 +281,6 @@ def findtemporadas(item):
check_temp = "yes"
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if len(item.extra.split("|")):
if len(item.extra.split("|")) >= 4:
fanart = item.extra.split("|")[2]
@@ -423,6 +421,7 @@ def findvideos(item):
new_item.infoLabels['episode'] = item.epi
new_item.infoLabels['season'] = item.temp
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
else:
title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]" + " " + "[COLOR darkred]" + server + " ( " + idioma + " )" + "[/COLOR]"
itemlist.append(Item(channel=item.channel, title=title, url=url, action="play", fanart=fanart,

View File

@@ -6,6 +6,7 @@ from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
@@ -217,56 +218,29 @@ def findvideos(item):
duplicados = []
data = get_source(item.url)
data = data.replace('amp;', '')
data_page = data
patron = 'class=TPlayerTb id=(.*?)>&lt;iframe width=&quot;560&quot; height=&quot;315&quot; src=&quot;(.*?)&quot;'
patron = '<div class=TPlayerTbCurrent id=(.*?)><iframe.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, video_page in matches:
language = scrapertools.find_single_match(data_page, 'TPlayerNv=%s><span>.*?<center>(.*?)<\/center>' % option)
if language == 'Castellano':
language = 'Español'
if language in audio:
id_audio = audio[language]
else:
id_audio = language
if 'redirect' in video_page or 'yourplayer' in video_page:
data = get_source('http:%s' % video_page)
patron = 'label:(.*?),.*?file:(.*?)&app.*?}'
matches = re.compile(patron, re.DOTALL).findall(data)
for video_url in matches:
url = video_url[1]
url = url.replace('\/', '/')
title = item.contentTitle + ' [%s][%s]' % (video_url[0], id_audio)
server = 'directo'
if url not in duplicados:
itemlist.append(item.clone(action='play',
title=title,
url=url,
server=server
))
duplicados.append(url)
else:
if video_page not in duplicados:
itemlist.extend(servertools.find_video_items(data=video_page))
duplicados.append(video_page)
for video_item in itemlist:
if video_item.server != 'directo':
video_item.channel = item.channel
video_item.quality = item.quality
video_item.title = item.contentTitle + ' [%s][%s]' % (video_item.server, id_audio)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(item.clone(title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.*?'
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(data,'<button id="(.*?)"')
for server in servers:
info_urls = urls_page.replace('embed','get')
video_info=httptools.downloadpage(info_urls+'/'+server).data
video_info = jsontools.load(video_info)
video_id = video_info['extid']
video_server = video_info['server']
video_status = video_info['status']
if video_status in ['finished', 'propio']:
if video_status == 'finished':
url = 'https://'+video_server+'/embed/'+video_id
else:
url = 'https://'+video_server+'/e/'+video_id
title = item.title
itemlist.append(item.clone(title=title, url=url, action='play', language=language))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -14,7 +14,8 @@
"thumbnail": "http://i.imgur.com/FzLmGKK.png",
"categories": [
"movie",
"tvshow"
"tvshow",
"direct"
],
"settings": [
{

View File

@@ -6,6 +6,7 @@ import unicodedata
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
@@ -152,20 +153,20 @@ def entradas(item):
thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]
if child['height'] < 720:
quality = "[B] [SD][/B]"
quality = "SD"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
quality = "720p"
elif child['height'] >= 1080:
quality = "[B] [1080p][/B]"
quality = "1080p"
fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
.decode("utf-8")
if child['name'] == "":
title = child['id'].rsplit(".", 1)[0]
else:
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
#if child['year']:
# title += " (" + child['year'] + ")"
#title += quality
video_urls = []
for k, v in child.get("video", {}).items():
@@ -175,7 +176,7 @@ def entradas(item):
itemlist.append(Item(channel=item.channel, action="findvideos", server="", title=title, url=url,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, video_urls=video_urls, text_color=color3))
contentTitle=fulltitle, video_urls=video_urls, text_color=color3, quality=quality))
return itemlist
@@ -517,12 +518,12 @@ def findvideos(item):
import base64
item.video_urls.sort(key=lambda it: (it[1], random.random()), reverse=True)
i = 0
calidad_actual = ""
for vid, calidad in item.video_urls:
title = "Ver vídeo en %sp" % calidad
if calidad != calidad_actual:
actual_quality = ""
for vid, quality in item.video_urls:
title = "Ver vídeo en %sp" % quality
if quality != actual_quality:
i = 0
calidad_actual = calidad
actual_quality = quality
if i % 2 == 0:
title += " [COLOR purple]Mirror %s[/COLOR] - %s" % (str(i + 1), item.fulltitle)
@@ -530,14 +531,15 @@ def findvideos(item):
title += " [COLOR green]Mirror %s[/COLOR] - %s" % (str(i + 1), item.fulltitle)
url = vid % "%s" % base64.b64decode("dHQ9MTQ4MDE5MDQ1MSZtbT1NRzZkclhFand6QmVzbmxSMHNZYXhBJmJiPUUwb1dVVVgx"
"WTBCQTdhWENpeU9paUE=")
itemlist.append(item.clone(title=title, action="play", url=url, server="directo", video_urls=""))
itemlist.append(item.clone(title=title, action="play", url=url, video_urls=""))
i += 1
if itemlist and item.extra == "" and config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color=color5,
contentTitle=item.fulltitle, url=item.url, action="add_pelicula_to_library",
infoLabels={'title': item.fulltitle}, extra="findvideos", fulltitle=item.fulltitle))
infoLabels={'title': item.fulltitle}, extra="findvideos", fulltitle=item.fulltitle
))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

Before

Width:  |  Height:  |  Size: 874 B

After

Width:  |  Height:  |  Size: 874 B

View File

Before

Width:  |  Height:  |  Size: 3.7 KiB

After

Width:  |  Height:  |  Size: 3.7 KiB

View File

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

Before

Width:  |  Height:  |  Size: 838 B

After

Width:  |  Height:  |  Size: 838 B

View File

Before

Width:  |  Height:  |  Size: 167 B

After

Width:  |  Height:  |  Size: 167 B

View File

Before

Width:  |  Height:  |  Size: 177 B

After

Width:  |  Height:  |  Size: 177 B