Merge pull request #552 from danielr460/master

Ajustes en canales
This commit is contained in:
Alfa
2019-02-13 14:26:31 -05:00
committed by GitHub
3 changed files with 49 additions and 44 deletions

View File

@@ -10,7 +10,7 @@ from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import autoplay, renumbertools
from channels import filtertools
tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
@@ -83,7 +83,7 @@ def mainlist(item):
))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
@@ -113,7 +113,9 @@ def lista(item):
patron = 'class="anime"><a href="([^"]+)">'
patron +='<div class="cover" style="background-image: url\((.*?)\)">.*?<h2>([^<]+)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
@@ -191,10 +193,11 @@ def episodios(item):
infoLabels = item.infoLabels
for scrapedurl, scrapedlang, scrapedtitle, episode in matches:
language = scrapedlang
title = scrapedtitle + " " + "1x" + episode
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = scrapedtitle + " " + str(season) +"x" + str(episode)
url = scrapedurl
infoLabels['season'] ='1'
infoLabels['episode'] = episode
infoLabels['season'] = str(season)
infoLabels['episode'] = str(episode)
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=IDIOMAS[language], infoLabels=infoLabels))
@@ -211,7 +214,7 @@ def episodios(item):
def findvideos(item):
logger.info()
from channels.pelisplus import add_vip
itemlist = []
data = get_source(item.url)
@@ -220,22 +223,12 @@ def findvideos(item):
for scrapedurl, language in matches:
vip = False
title = '%s [%s]'
if not config.get_setting('unify'):
title = ' [%s]' % IDIOMAS[language]
else:
title = ''
if 'pelisplus.net' in scrapedurl:
referer = scrapedurl
post = {'r':item.url, 'd': 'www.pelisplus.net'}
post = urllib.urlencode(post)
scrapedurl = scrapedurl.replace('/v/', '/api/source/')
url_data = httptools.downloadpage(scrapedurl, post=post, headers={'Referer':referer}).data
patron = '"file":"([^"]+)","label":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(url_data)
for url, quality in matches:
url = 'https://www.pelisplus.net' + url.replace('\/', '/')
itemlist.append(
Item(channel=item.channel, title=title, url=url, action='play', language=IDIOMAS[language],
quality=quality, infoLabels=item.infoLabels, server='directo'))
itemlist += add_vip(item, scrapedurl, IDIOMAS[language])
vip = True
elif 'server' in scrapedurl:
new_data = get_source(scrapedurl)
@@ -243,10 +236,10 @@ def findvideos(item):
if not vip:
itemlist.append(item.clone(title=title, url=scrapedurl.strip(), action='play',
itemlist.append(item.clone(title='%s'+title, url=scrapedurl.strip(), action='play',
language=IDIOMAS[language]))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)

View File

@@ -167,7 +167,8 @@ def findvideos(item):
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedserver, scrapeduser in matches:
data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data
url = base64.b64decode(scrapertools.find_single_match(data1, 'hashUser = "([^"]+)'))
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
url = base64.b64decode(scrapertools.find_single_match(data1, '<iframe data-source="([^"]+)"'))
url1 = devuelve_enlace(url)
if "drive.google" in url1:
url1 = url1.replace("view","preview")

View File

@@ -216,6 +216,28 @@ def section(item):
return itemlist
def add_vip(item, video_url, language=None):
logger.info()
itemlist = []
referer = video_url
post = {'r': item.url, 'd': 'www.pelisplus.net'}
post = urllib.urlencode(post)
video_url = video_url.replace('/v/', '/api/source/')
url_data = httptools.downloadpage(video_url, post=post, headers={'Referer': referer}).data
patron = '"file":"([^"]+)","label":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(url_data)
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = ''
for url, quality in matches:
url = url.replace('\/', '/')
itemlist.append(
Item(channel=item.channel, title='%s'+title, url=url, action='play', language=language,
quality=quality, infoLabels=item.infoLabels))
return itemlist
def findvideos(item):
logger.info()
import urllib
@@ -227,34 +249,22 @@ def findvideos(item):
for video_url in matches:
language = 'latino'
url = ''
if not config.get_setting('unify'):
title = ' [%s]' % language.capitalize()
title = ' [%s]' % IDIOMAS[language]
else:
title = ''
if 'pelisplus.net' in video_url:
referer = video_url
post = {'r':item.url, 'd': 'www.pelisplus.net'}
post = urllib.urlencode(post)
video_url = video_url.replace('/v/', '/api/source/')
url_data = httptools.downloadpage(video_url, post=post, headers={'Referer':referer}).data
patron = '"file":"([^"]+)","label":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(url_data)
for url, quality in matches:
url = url.replace('\/', '/')
itemlist.append(
Item(channel=item.channel, title='%s' + title, url=url, action='play', language=IDIOMAS[language],
quality=quality, infoLabels=item.infoLabels))
itemlist += add_vip(item, video_url, IDIOMAS[language])
else:
if not 'vidoza' in video_url and not 'pelishd' in video_url:
url_data = get_source(video_url)
if 'vidoza' not in video_url and not 'pelishd' in video_url:
elif not 'vidoza' in video_url and not 'pelishd' in video_url:
url_data = get_source(video_url)
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
else:
url = video_url
if not 'server' in url:
url = url
@@ -276,6 +286,7 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools