Add files via upload
Correcciones: - vi2: Corrección a secciones ajenas a streaming o torrent - CanalPelis: Corrección por cambio de estructura. - DoramasMP4: Corrección en paginacion + mejoras - UltraPeliculasHD: Corrección en la detección de enlaces - PeliculasHD: Corrección en la detección de enlaces - PeliculonHD: Corrección en la detección de enlaces
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
"name": "CanalPelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"language": ["cast", "lat", "vose"],
|
||||
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/canalpelisbg.jpg",
|
||||
"thumbnail": "http://www.canalpelis.com/wp-content/uploads/2016/11/logo_web.gif",
|
||||
"banner": "",
|
||||
@@ -44,6 +44,38 @@
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -52,11 +52,11 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Peliculas", action="peliculas",thumbnail=get_thumb('movies', auto=True),
|
||||
itemlist.append(item.clone(title="Peliculas", action="peliculas", thumbnail=get_thumb('movies', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'movies/', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Géneros", action="generos",thumbnail=get_thumb('genres', auto=True),
|
||||
itemlist.append(item.clone(title="Géneros", action="generos", thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'genre/', viewmode="movie_with_plot"))
|
||||
|
||||
@@ -64,7 +64,7 @@ def mainlist(item):
|
||||
text_blod=True, page=0, viewcontent='movies', url=host + 'release/',
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
|
||||
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0))
|
||||
|
||||
itemlist.append(item.clone(title="Series", action="series", extra='serie', url=host + 'tvshows/',
|
||||
@@ -122,6 +122,34 @@ def sub_search(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + 'movies/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + "genre/cine-animacion/"
|
||||
elif categoria == 'terror':
|
||||
item.url = host + "genre/cine-terror/"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == "» Siguiente »":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -137,7 +165,7 @@ def peliculas(item):
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 20]:
|
||||
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 30]:
|
||||
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
|
||||
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
|
||||
@@ -148,12 +176,12 @@ def peliculas(item):
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'year': year},
|
||||
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer", quality = quality))
|
||||
title=title, context="buscar_trailer", quality=quality))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 20 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 20,
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(
|
||||
@@ -223,13 +251,12 @@ def series(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">.*?'
|
||||
patron += '<div class="texto">([^<]+)</div>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches[item.page:item.page + 30]:
|
||||
if plot == '':
|
||||
plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>')
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').replace(
|
||||
@@ -238,13 +265,20 @@ def series(item):
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot,
|
||||
thumbnail=scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
# url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=__channel__, action="series",
|
||||
title="» Siguiente »", url=url_next_page))
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(
|
||||
data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -348,27 +382,32 @@ def episodios(item):
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
from lib import generictools
|
||||
import urllib
|
||||
import base64
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
|
||||
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
|
||||
patron = "data-post='(\d+)' data-nume='(\d+)'.*?img src='([^']+)'>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for id, option, lang in matches:
|
||||
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
|
||||
if lang == 'en':
|
||||
lang = 'VOSE'
|
||||
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'movie'}
|
||||
lang = lang.lower().strip()
|
||||
idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'es': '[COLOR green](CAST)[/COLOR]',
|
||||
'en': '[COLOR red](VOSE)[/COLOR]',
|
||||
'gb': '[COLOR red](VOSE)[/COLOR]'}
|
||||
if lang in idioma:
|
||||
lang = idioma[lang]
|
||||
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type': 'movie'}
|
||||
post = urllib.urlencode(post)
|
||||
test_url = '%swp-admin/admin-ajax.php' % host
|
||||
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
|
||||
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer': item.url}).data
|
||||
hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
new_data = httptools.downloadpage(hidden_url, follow_redirects=False)
|
||||
|
||||
try:
|
||||
b64_url = scrapertools.find_single_match(new_data.headers['location'], "y=(.*)")
|
||||
url = base64.b64decode(b64_url)
|
||||
@@ -376,10 +415,11 @@ def findvideos(item):
|
||||
url = hidden_url
|
||||
if url != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, url=url, title='%s', action='play', language=lang,
|
||||
infoLabels=item.infoLabels))
|
||||
Item(channel=item.channel, action='play', language=lang, infoLabels=item.infoLabels,
|
||||
url=url, title='Ver en: ' + '[COLOR yellowgreen]%s [/COLOR]' + lang))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
|
||||
@@ -40,9 +40,14 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
|
||||
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Variedades", action="list_all",
|
||||
url=host + 'catalogue?format%5B%5D=varieties&sort=latest',
|
||||
thumbnail='', type='dorama'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
|
||||
url=host + 'catalogue?format%5B%5D=movie', thumbnail=get_thumb('movies', auto=True),
|
||||
type='movie'))
|
||||
url=host + 'catalogue?format%5B%5D=movie&sort=latest',
|
||||
thumbnail=get_thumb('movies', auto=True), type='movie'))
|
||||
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
@@ -55,8 +60,9 @@ def doramas_menu(item):
|
||||
|
||||
itemlist =[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue?format%5B%5D=drama',
|
||||
thumbnail=get_thumb('all', auto=True), type='dorama'))
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all",
|
||||
url=host + 'catalogue?format%5B%5D=drama&sort=latest', thumbnail=get_thumb('all', auto=True),
|
||||
type='dorama'))
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
|
||||
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
|
||||
return itemlist
|
||||
@@ -67,6 +73,7 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
|
||||
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
|
||||
|
||||
@@ -93,14 +100,10 @@ def list_all(item):
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
if item.type != 'dorama':
|
||||
page_base = host+'catalogue?type[]=pelicula'
|
||||
else:
|
||||
page_base = host + 'catalogue'
|
||||
next_page = scrapertools.find_single_match(data, '<a href=([^ ]+) aria-label=Netx>')
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" aria-label="Netx">')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
|
||||
url=page_base+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
|
||||
url=host+'catalogue'+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
|
||||
type=item.type))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -238,14 +238,15 @@ def findvideos(item):
|
||||
import urllib
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'data-post="(\d+)" data-nume="(\d+).*?class="title">([^>]+)<'
|
||||
data = data.replace("'", '"')
|
||||
patron = 'data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+).*?class="title">([^>]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for pt, nm, language in matches:
|
||||
for type, pt, nm, language in matches:
|
||||
|
||||
if 'sub' in language.lower() or language not in IDIOMAS:
|
||||
language = 'VOSE'
|
||||
post = {'action': 'doo_player_ajax', 'post': pt, 'nume': nm}
|
||||
post = {'action': 'doo_player_ajax', 'post': pt, 'nume': nm, 'type': type}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post,
|
||||
headers={'Referer': item.url}).data
|
||||
|
||||
@@ -246,9 +246,10 @@ def findvideos(item):
|
||||
import urllib
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
|
||||
data = data.replace("'",'"')
|
||||
patron = 'data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+).*?img src=\"([^"]+)\"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for id, option, lang in matches:
|
||||
for type, id, option, lang in matches:
|
||||
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
|
||||
quality = ''
|
||||
if lang not in IDIOMAS:
|
||||
@@ -258,7 +259,7 @@ def findvideos(item):
|
||||
else:
|
||||
title = ''
|
||||
|
||||
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':item.type}
|
||||
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':type}
|
||||
post = urllib.urlencode(post)
|
||||
|
||||
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'
|
||||
|
||||
@@ -10,27 +10,10 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from channels import filtertools, autoplay
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'http://www.ultrapeliculashd.com'
|
||||
|
||||
tgenero = {"ACCIÓN": "https://s3.postimg.cc/y6o9puflv/accion.png,",
|
||||
"ANIMACIÓN": "https://s13.postimg.cc/5on877l87/animacion.png",
|
||||
"AVENTURA": "https://s10.postimg.cc/6su40czih/aventura.png",
|
||||
"CIENCIA FICCIÓN": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
|
||||
"COMEDIA": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
|
||||
"CRIMEN": "https://s4.postimg.cc/6z27zhirx/crimen.png",
|
||||
"DRAMA": "https://s16.postimg.cc/94sia332d/drama.png",
|
||||
"ESTRENOS": "https://s21.postimg.cc/fy69wzm93/estrenos.png",
|
||||
"FAMILIA": "https://s7.postimg.cc/6s7vdhqrf/familiar.png",
|
||||
"FANTASÍA": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
|
||||
"GUERRA": "https://s4.postimg.cc/n1h2jp2jh/guerra.png",
|
||||
"INFANTIL": "https://s23.postimg.cc/g5rmazozv/infantil.png",
|
||||
"MISTERIO": "https://s1.postimg.cc/w7fdgf2vj/misterio.png",
|
||||
"ROMANCE": "https://s15.postimg.cc/fb5j8cl63/romance.png",
|
||||
"SUSPENSO": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
|
||||
"TERROR": "https://s7.postimg.cc/yi0gij3gb/terror.png"
|
||||
}
|
||||
|
||||
thumbletras = {'#': 'https://s32.postimg.cc/drojt686d/image.png',
|
||||
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
|
||||
'b': 'https://s32.postimg.cc/y1qgm1yp1/image.png',
|
||||
@@ -81,31 +64,27 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas",
|
||||
action="lista",
|
||||
thumbnail='https://s18.postimg.cc/fwvaeo6qh/todas.png',
|
||||
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url=host + '/movies/'
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Generos",
|
||||
action="generos",
|
||||
url=host,
|
||||
thumbnail='https://s3.postimg.cc/5s9jg2wtf/generos.png',
|
||||
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png'
|
||||
thumbnail=get_thumb('genres', auto=True)
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Alfabetico",
|
||||
action="seccion",
|
||||
url=host,
|
||||
thumbnail='https://s17.postimg.cc/fwi1y99en/a-z.png',
|
||||
fanart='https://s17.postimg.cc/fwi1y99en/a-z.png',
|
||||
thumbnail=get_thumb('alphabet', auto=True),
|
||||
extra='alfabetico'
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar",
|
||||
action="search",
|
||||
url=host + '/?s=',
|
||||
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
thumbnail=get_thumb('search', auto=True)
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
@@ -168,8 +147,6 @@ def generos(item):
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = ''
|
||||
fanart = ''
|
||||
if scrapedtitle in tgenero:
|
||||
thumbnail = tgenero[scrapedtitle]
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
if scrapedtitle not in ['PRÓXIMAMENTE', 'EN CINE']:
|
||||
@@ -221,56 +198,46 @@ def alpha(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer is None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def findvideos(item):
|
||||
from lib import jsunpack
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = '<div id=(option.*?) class=play.*?<iframe.*?'
|
||||
patron += 'rptss src=(.*?) (?:width.*?|frameborder.*?) allowfullscreen><\/iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
full_data = get_source(item.url)
|
||||
patron = '<div id="([^"]+)" class="play-box-iframe.*?src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(full_data)
|
||||
for option, video_url in matches:
|
||||
language = scrapertools.find_single_match(data, '#%s>.*?-->(.*?)(?:\s|<)' % option)
|
||||
language = scrapertools.find_single_match(full_data, '"#%s">.*?-->(.*?)(?:\s|<)' % option)
|
||||
if 'sub' in language.lower():
|
||||
language = 'SUB'
|
||||
language = IDIOMAS[language]
|
||||
if 'ultrapeliculashd' in video_url:
|
||||
new_data = httptools.downloadpage(video_url).data
|
||||
new_data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", new_data)
|
||||
if 'drive' not in video_url:
|
||||
quality= '1080p'
|
||||
packed = scrapertools.find_single_match(new_data, '<script>(eval\(.*?)eval')
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
url = scrapertools.find_single_match(unpacked, 'file:(http.?:.*?)\}')
|
||||
else:
|
||||
quality= '1080p'
|
||||
url = scrapertools.find_single_match(new_data, '</div><iframe src=([^\s]+) webkitallowfullscreen')
|
||||
|
||||
elif 'stream' in video_url and 'streamango' not in video_url:
|
||||
data = httptools.downloadpage('https:'+video_url).data
|
||||
if not 'iframe' in video_url:
|
||||
new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
|
||||
new_data = httptools.downloadpage(new_url).data
|
||||
url= ''
|
||||
try:
|
||||
url, quality = scrapertools.find_single_match(new_data, 'file:.*?(?:\"|\')(https.*?)(?:\"|\'),'
|
||||
'label:.*?(?:\"|\')(.*?)(?:\"|\'),')
|
||||
except:
|
||||
pass
|
||||
if url != '':
|
||||
headers_string = '|Referer=%s' % url
|
||||
url = url.replace('download', 'preview')+headers_string
|
||||
data = httptools.downloadpage(video_url, follow_redirects=False, headers={'Referer': item.url}).data
|
||||
|
||||
sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"')
|
||||
new_item = (Item(title=item.title, url=url, quality=quality, subtitle=sub, server='directo',
|
||||
language = language))
|
||||
itemlist.append(new_item)
|
||||
|
||||
else:
|
||||
url = video_url
|
||||
quality = 'default'
|
||||
if 'hideload' in video_url:
|
||||
quality = ''
|
||||
new_id = scrapertools.find_single_match(data, "var OLID = '([^']+)'")
|
||||
new_url = 'https://www.ultrapeliculashd.com/hideload/?ir=%s' % new_id[::-1]
|
||||
data = httptools.downloadpage(new_url, follow_redirects=False, headers={'Referer': video_url}).headers
|
||||
url = data['location']+"|%s" % video_url
|
||||
elif 'd.php' in video_url:
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
quality = '1080p'
|
||||
packed = scrapertools.find_single_match(data, '<script>(eval\(.*?)eval')
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
url = scrapertools.find_single_match(unpacked, '"file":("[^"]+)"')
|
||||
elif 'drive' in video_url:
|
||||
quality = '1080p'
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
url = scrapertools.find_single_match(data, 'src="([^"]+)"')
|
||||
|
||||
if not config.get_setting("unify"):
|
||||
title = ' [%s] [%s]' % (quality, language)
|
||||
|
||||
@@ -75,10 +75,10 @@ def select_menu(item):
|
||||
thumbnail=get_thumb('all', auto=True), type=item.type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Generos', action='section', url=url,
|
||||
thumbnail=get_thumb('genres', auto=True), type=item.type))
|
||||
thumbnail=get_thumb('genres', auto=True), type='all'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url,
|
||||
thumbnail=get_thumb('year', auto=True), type=item.type))
|
||||
thumbnail=get_thumb('year', auto=True), type='all'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + 'ajax/1/?q=',
|
||||
thumbnail=get_thumb("search", auto=True), type=item.type))
|
||||
@@ -202,7 +202,7 @@ def list_all(item):
|
||||
if lang != '':
|
||||
title = '%s [%s]' % (title, lang)
|
||||
|
||||
thumbnail = scrapedthumbnail
|
||||
thumbnail = host+scrapedthumbnail
|
||||
url = host+scrapedurl
|
||||
if item.type == 'series':
|
||||
season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)x(\d+)')
|
||||
@@ -225,7 +225,7 @@ def list_all(item):
|
||||
infoLabels = infoLabels
|
||||
)
|
||||
|
||||
if item.type == 'peliculas':
|
||||
if item.type == 'peliculas' or item.type == 'all':
|
||||
new_item.contentTitle = scrapedtitle
|
||||
else:
|
||||
scrapedtitle = scrapedtitle.split(' - ')
|
||||
@@ -235,6 +235,7 @@ def list_all(item):
|
||||
listed.append(title)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
# Paginación
|
||||
|
||||
if json_data['next']:
|
||||
@@ -263,16 +264,19 @@ def findvideos(item):
|
||||
for url in matches:
|
||||
title = ''
|
||||
link_type = ''
|
||||
server = ''
|
||||
url = base64.b64decode(url)
|
||||
|
||||
if 'torrent' in url and item.link_type == 'torrent':
|
||||
server = 'torrent'
|
||||
link_type = 'torrent'
|
||||
title = ' [%s]' % item.torrent_data
|
||||
if 'torrent' in url:
|
||||
if item.link_type == 'torrent' or item.type == 'all':
|
||||
server = 'torrent'
|
||||
link_type = 'torrent'
|
||||
title = ' [%s]' % item.torrent_data
|
||||
elif 'torrent' not in url:
|
||||
link_type = 'flash'
|
||||
|
||||
if url != '' and (link_type == item.link_type.lower()):
|
||||
|
||||
if link_type == item.link_type.lower() or item.type == 'all':
|
||||
itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', server=server,
|
||||
language=lang, quality=quality, infoLabels=item.infoLabels))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user