Merge pull request #523 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2019-01-09 14:08:36 -05:00
committed by GitHub
9 changed files with 373 additions and 88 deletions

View File

@@ -0,0 +1,61 @@
{
"id": "cineasiaenlinea",
"name": "CineAsiaEnLinea",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5KOU8uy.png?3",
"banner": "cineasiaenlinea.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en búsqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,177 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = "http://www.cineasiaenlinea.com/"
__channel__='cineasiaenlinea'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
# Configuracion del canal
__perfil__ = int(config.get_setting('perfil', 'cineasiaenlinea'))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
if __perfil__ - 1 >= 0:
color1, color2, color3 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = ""
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Novedades", url=host + "archivos/peliculas",
thumbnail=get_thumb('newest', auto=True), text_color=color1,))
itemlist.append(item.clone(action="peliculas", title="Estrenos", url=host + "archivos/estrenos",
thumbnail=get_thumb('premieres', auto=True), text_color=color1))
itemlist.append(item.clone(action="indices", title="Por géneros", url=host,
thumbnail=get_thumb('genres', auto=True), text_color=color1))
itemlist.append(item.clone(action="indices", title="Por país", url=host, text_color=color1,
thumbnail=get_thumb('country', auto=True)))
itemlist.append(item.clone(action="indices", title="Por año", url=host, text_color=color1,
thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = "%s?s=%s" % (host, texto.replace(" ", "+"))
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + "archivos/peliculas"
elif categoria == 'terror':
item.url = host + "genero/terror"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<h3><a href="([^"]+)">([^<]+)<.*?src="([^"]+)".*?<a rel="tag">([^<]+)<' \
'.*?<a rel="tag">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, year, calidad in matches:
title = re.sub(r' \((\d+)\)', '', scrapedtitle)
scrapedtitle += " [%s]" % calidad
infolab = {'year': year}
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels=infolab,
contentTitle=title, contentType="movie", quality=calidad))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page))
return itemlist
def indices(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
logger.info(data)
if "géneros" in item.title:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por genero</h4>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<')
elif "año" in item.title:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Año</h4>(.*?)</select>')
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)">([^<]+)<')
else:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Pais</h4>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<')
for scrapedurl, scrapedtitle in matches:
if "año" in item.title:
scrapedurl = "%sfecha-estreno/%s" % (host, scrapedurl)
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, text_color=color3))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
item.infoLabels["plot"] = scrapertools.find_single_match(data, '(?i)<h2>SINOPSIS.*?<p>(.*?)</p>')
item.infoLabels["trailer"] = scrapertools.find_single_match(data, 'src="(http://www.youtube.com/embed/[^"]+)"')
itemlist = servertools.find_video_items(item=item, data=data)
for it in itemlist:
it.thumbnail = item.thumbnail
it.text_color = color2
itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca"))
if item.infoLabels["trailer"]:
folder = True
if config.is_xbmc():
folder = False
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Ver Trailer", folder=folder,
contextual=not folder))
return itemlist

View File

@@ -101,7 +101,7 @@ def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, 'Playerholder.*?src="([^"]+)"')
item.url = scrapertools.find_single_match(data, '(?i)Playerholder.*?src="([^"]+)"')
if "tubst.net" in item.url:
url = scrapertools.find_single_match(data, 'itemprop="embedURL" content="([^"]+)')
data = httptools.downloadpage(url).data

View File

@@ -10,7 +10,7 @@
"categories": [
"movie",
"tvshow",
"vose",
"vose"
],
"settings": [
{

View File

@@ -102,14 +102,14 @@ def sub_search(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages')
# logger.info(data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
patron += '<span class="year">([^<]+)</span>'
data = scrapertools.find_single_match(data, 'Archivos (.*?)resppages')
patron = 'img alt="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'href="([^"]+)".*?'
patron += 'fechaestreno">([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
for scrapedtitle, scrapedthumbnail, scrapedurl, year in matches:
if 'tvshows' not in scrapedurl:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
@@ -133,18 +133,19 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
# logger.info(data)
# img, title
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url
patron += '</h3><span>([^<]+)</span>' # year
patron = '(?is)movie-img img-box.*?alt="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'href="([^"]+).*?'
patron += 'fechaestreno">([^<]+).*?'
patron += 'quality">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]:
for scrapedtitle, scrapedthumbnail, scrapedurl, year, quality in matches[item.page:item.page + 30]:
title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality)
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
@@ -172,10 +173,10 @@ def genresYears(item):
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
data = scrapertools.decodeHtmlentities(data)
if item.title == "Estrenos por Año":
if item.title == "Estrenos":
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
else:
patron_todas = '<h2>Generos</h2>(.*?)</div><aside'
patron_todas = '(?is)genres falsescroll(.*?)</div> </aside'
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
data = scrapertools.find_single_match(data, patron_todas)

View File

@@ -2,6 +2,7 @@
import re
import urllib
import base64
from core import httptools
from core import scrapertools
@@ -185,27 +186,70 @@ def search(item, texto):
def findvideos(item):
logger.info()
itemlist = []
global new_data
new_data = []
data = get_source(item.url)
data = data.replace("&lt;","<").replace("&quot;",'"').replace("&gt;",">").replace("&amp;","&").replace('\"',"")
patron = '<div class=TPlayerTb.*?id=(.*?)>.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
headers = {'referer':item.url}
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
headers = {'referer':item.url}
if 'trembed' in urls_page:
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data=httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="(.*?)" ')
itemlist.append(item.clone(title='[%s][%s]',
url=urls_page,
action='play',
language=language,
))
sub_data = httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="([^"]+)" ')
if "repro.live" in urls_page:
server_repro(urls_page)
if "itatroniks.com" in urls_page:
server_itatroniks(urls_page)
for url in new_data:
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
new_data = []
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist
def server_itatroniks(urls_page):
logger.info()
headers = {"Referer":urls_page}
id = scrapertools.find_single_match(urls_page, 'embed/(\w+)')
sub_data = httptools.downloadpage(urls_page, headers = headers).data
matches = scrapertools.find_multiple_matches(sub_data, 'button id="([^"]+)')
headers1 = ({"X-Requested-With":"XMLHttpRequest"})
for serv in matches:
data1 = httptools.downloadpage("https://itatroniks.com/get/%s/%s" %(id, serv), headers = headers1).data
data_json = jsontools.load(data1)
urls_page = ""
try:
if "finished" == data_json["status"]: urls_page = "https://%s/embed/%s" %(data_json["server"], data_json["extid"])
if "propio" == data_json["status"]: urls_page = "https://%s/e/%s" %(data_json["server"], data_json["extid"])
except:
continue
new_data.append(urls_page)
def server_repro(urls_page):
logger.info()
headers = {"Referer":urls_page}
sub_data = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
for urls_page in urls_page1:
urls_page += "==" # base64.decode no decodifica si no tiene al final "=="
urls_page = base64.b64decode(urls_page)
if "repro.live" in urls_page:
data1 = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(data1, 'source src="([^"]+)')
for urls_page in urls_page1:
new_data.append(urls_page)
else:
new_data.append(urls_page)
def newest(categoria):
logger.info()
itemlist = []

View File

@@ -186,7 +186,7 @@ def findvideos(item):
for datos in dict:
url1 = datos["url"]
hostname = scrapertools.find_single_match(datos["hostname"].replace("www.",""), "(.*?)\.")
if "repelisgo" in hostname: continue
if "repelisgo" in hostname or "repelis.io" in datos["hostname"]: continue
if hostname == "my": hostname = "mailru"
titulo = "Ver en: " + hostname.capitalize() + " (" + cali[datos["quality"]] + ") (" + idio[datos["audio"]] + ")"
itemlist.append(

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net|thouth\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urllib, random, base64
from core import httptools
from core import jsontools
@@ -12,7 +12,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
page_url = page_url.replace("http://netu.tv/watch_video.php?v=", "http://hqq.tv/player/embed_player.php?vid=")
page_url = page_url.replace("/watch_video.php?v=", "/player/embed_player.php?vid=")
data = httptools.downloadpage(page_url).data
if "var userid = '';" in data.lower():
return False, "[netutv] El archivo no existe o ha sido borrado"
@@ -21,72 +21,74 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
if "hash=" in page_url:
data = urllib.unquote(httptools.downloadpage(page_url).data)
id_video = scrapertools.find_single_match(data, "vid':'([^']+)'")
page_url = "http://hqq.watch/player/embed_player.php?vid=%s" % id_video
else:
id_video = page_url.rsplit("=", 1)[1]
page_url_hqq = "http://hqq.watch/player/embed_player.php?vid=%s&autoplay=no" % id_video
data_page_url_hqq = httptools.downloadpage(page_url_hqq, add_referer=True).data
js_wise = scrapertools.find_single_match(data_page_url_hqq,
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data_unwise = jswise(js_wise).replace("\\", "")
at = scrapertools.find_single_match(data_unwise, 'at=(\w+)')
http_referer = scrapertools.find_single_match(data_unwise, 'http_referer=(.*?)&')
url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on" \
"&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=" % (id_video, at, http_referer)
data_player = httptools.downloadpage(url, add_referer=True).data
data_unescape = scrapertools.find_multiple_matches(data_player, 'document.write\(unescape\("([^"]+)"')
data = ""
for d in data_unescape:
data += urllib.unquote(d)
subtitle = scrapertools.find_single_match(data, 'value="sublangs=Spanish.*?sub=([^&]+)&')
if not subtitle:
subtitle = scrapertools.find_single_match(data, 'value="sublangs=English.*?sub=([^&]+)&')
data_unwise_player = ""
js_wise = scrapertools.find_single_match(data_player,
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
if js_wise:
data_unwise_player = jswise(js_wise).replace("\\", "")
vars_data = scrapertools.find_single_match(data, '/player/get_md5.php",\s*\{(.*?)\}')
matches = scrapertools.find_multiple_matches(vars_data, '\s*([^:]+):\s*([^,]*)[,"]')
params = {}
for key, value in matches:
if key == "adb":
params[key] = "0/"
elif '"' in value:
params[key] = value.replace('"', '')
else:
value_var = scrapertools.find_single_match(data, 'var\s*%s\s*=\s*"([^"]+)"' % value)
if not value_var and data_unwise_player:
value_var = scrapertools.find_single_match(data_unwise_player, 'var\s*%s\s*=\s*"([^"]+)"' % value)
params[key] = value_var
params = urllib.urlencode(params)
head = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
data = httptools.downloadpage("http://hqq.watch/player/get_md5.php?" + params, headers=head).data
media_urls = []
url_data = jsontools.load(data)
media_url = tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
if not media_url.startswith("http"):
media_url = "https:" + media_url
video_urls = []
media = media_url + "|User-Agent=Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X)"
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [netu.tv]", media, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
page_url = page_url.replace("/watch_video.php?v=", "/player/embed_player.php?vid=")
page_url = page_url.replace('https://netu.tv/', 'http://hqq.watch/')
page_url = page_url.replace('https://waaw.tv/', 'http://hqq.watch/')
data = httptools.downloadpage(page_url).data
# ~ logger.debug(data)
js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data = jswise(js_wise).replace("\\", "")
# ~ logger.debug(data)
alea = str(random.random())[2:]
data_ip = httptools.downloadpage('http://hqq.watch/player/ip.php?type=json&rand=%s' % alea).data
# ~ logger.debug(data_ip)
json_data_ip = jsontools.load(data_ip)
url = scrapertools.find_single_match(data, 'self\.location\.replace\("([^)]+)\)')
url = url.replace('"+rand+"', alea)
url = url.replace('"+data.ip+"', json_data_ip['ip'])
url = url.replace('"+need_captcha+"', '0') #json_data_ip['need_captcha'])
url = url.replace('"+token', '')
# ~ logger.debug(url)
headers = { "User-Agent": 'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.127 Large Screen Safari/533.4 GoogleTV/162671' }
data = httptools.downloadpage('http://hqq.watch'+url, headers=headers).data
# ~ logger.debug(data)
codigo_js = scrapertools.find_multiple_matches(data, '<script>document.write\(unescape\("([^"]+)')
# ~ logger.debug(codigo_js)
js_aux = urllib.unquote(codigo_js[0])
at = scrapertools.find_single_match(js_aux, 'var at = "([^"]+)')
js_aux = urllib.unquote(codigo_js[1])
var_link_1 = scrapertools.find_single_match(js_aux, '&link_1=\\"\+encodeURIComponent\(([^)]+)')
var_server_2 = scrapertools.find_single_match(js_aux, '&server_2=\\"\+encodeURIComponent\(([^)]+)')
vid = scrapertools.find_single_match(js_aux, '&vid=\\"\+encodeURIComponent\(\\"([^"]+)')
ext = '.mp4.m3u8'
# ~ logger.debug('%s %s %s %s' % (at, var_link_1, var_server_2, vid))
js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data = jswise(js_wise).replace("\\", "")
# ~ logger.debug(data)
variables = scrapertools.find_multiple_matches(data, 'var ([a-zA-Z0-9]+) = "([^"]+)";')
# ~ logger.debug(variables)
for nombre, valor in variables:
# ~ logger.debug('%s %s' % (nombre, valor))
if nombre == var_link_1: link_1 = valor
if nombre == var_server_2: server_2 = valor
link_m3u8 = 'http://hqq.watch/player/get_md5.php?ver=2&at=%s&adb=0&b=1&link_1=%s&server_2=%s&vid=%s&ext=%s' % (at, link_1, server_2, vid, ext)
# ~ logger.debug(link_m3u8)
video_urls.append(["[netu.tv]", link_m3u8])
return video_urls
## Obtener la url del m3u8
def tb(b_m3u8_2):
j = 0
s2 = ""
while j < len(b_m3u8_2):
s2 += "\\u0" + b_m3u8_2[j:(j + 3)]
j += 3
return s2.decode('unicode-escape').encode('ASCII', 'ignore')
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------