diff --git a/plugin.video.alfa/channels/cineasiaenlinea.json b/plugin.video.alfa/channels/cineasiaenlinea.json new file mode 100644 index 00000000..db762a51 --- /dev/null +++ b/plugin.video.alfa/channels/cineasiaenlinea.json @@ -0,0 +1,61 @@ +{ + "id": "cineasiaenlinea", + "name": "CineAsiaEnLinea", + "active": true, + "adult": false, + "language": ["cast", "lat"], + "thumbnail": "http://i.imgur.com/5KOU8uy.png?3", + "banner": "cineasiaenlinea.png", + "categories": [ + "movie", + "vos" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en búsqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - terror", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cineasiaenlinea.py b/plugin.video.alfa/channels/cineasiaenlinea.py new file mode 100644 index 00000000..09855b82 --- /dev/null +++ b/plugin.video.alfa/channels/cineasiaenlinea.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger +from channelselector import get_thumb + +host = "http://www.cineasiaenlinea.com/" +__channel__='cineasiaenlinea' + +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) +except: + __modo_grafico__ = True + +# Configuracion del canal +__perfil__ = int(config.get_setting('perfil', 'cineasiaenlinea')) + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] + +if __perfil__ - 1 >= 0: + color1, color2, color3 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = "" + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(action="peliculas", title="Novedades", url=host + "archivos/peliculas", + thumbnail=get_thumb('newest', auto=True), text_color=color1,)) + itemlist.append(item.clone(action="peliculas", title="Estrenos", url=host + "archivos/estrenos", + thumbnail=get_thumb('premieres', auto=True), text_color=color1)) + itemlist.append(item.clone(action="indices", title="Por géneros", url=host, + thumbnail=get_thumb('genres', auto=True), text_color=color1)) + itemlist.append(item.clone(action="indices", title="Por país", url=host, text_color=color1, + thumbnail=get_thumb('country', auto=True))) + itemlist.append(item.clone(action="indices", title="Por año", url=host, text_color=color1, + thumbnail=get_thumb('year', auto=True))) + + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3, + thumbnail=get_thumb('search', auto=True))) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + + item.url = "%s?s=%s" % (host, texto.replace(" ", "+")) + + try: + return peliculas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = host + "archivos/peliculas" + elif categoria == 'terror': + item.url = host + "genero/terror" + item.action = "peliculas" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + item.text_color = color2 + + # Descarga la página + data = httptools.downloadpage(item.url).data + + patron = '

([^<]+)<.*?src="([^"]+)".*?

([^<]+)<') + elif "año" in item.title: + bloque = scrapertools.find_single_match(data, '(?i)

Peliculas por Año

(.*?)') + matches = scrapertools.find_multiple_matches(bloque, '
([^<]+)<') + + for scrapedurl, scrapedtitle in matches: + if "año" in item.title: + scrapedurl = "%sfecha-estreno/%s" % (host, scrapedurl) + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + thumbnail=item.thumbnail, text_color=color3)) + + return itemlist + + +def findvideos(item): + logger.info() + data = httptools.downloadpage(item.url).data + item.infoLabels["plot"] = scrapertools.find_single_match(data, '(?i)

SINOPSIS.*?

(.*?)

') + item.infoLabels["trailer"] = scrapertools.find_single_match(data, 'src="(http://www.youtube.com/embed/[^"]+)"') + + itemlist = servertools.find_video_items(item=item, data=data) + for it in itemlist: + it.thumbnail = item.thumbnail + it.text_color = color2 + + itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca")) + if item.infoLabels["trailer"]: + folder = True + if config.is_xbmc(): + folder = False + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Ver Trailer", folder=folder, + contextual=not folder)) + + return itemlist diff --git a/plugin.video.alfa/channels/erotik.py b/plugin.video.alfa/channels/erotik.py index d5ed6678..b36be3f4 100755 --- a/plugin.video.alfa/channels/erotik.py +++ b/plugin.video.alfa/channels/erotik.py @@ -101,7 +101,7 @@ def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - item.url = scrapertools.find_single_match(data, 'Playerholder.*?src="([^"]+)"') + item.url = scrapertools.find_single_match(data, '(?i)Playerholder.*?src="([^"]+)"') if "tubst.net" in item.url: url = scrapertools.find_single_match(data, 'itemprop="embedURL" content="([^"]+)') data = httptools.downloadpage(url).data diff --git a/plugin.video.alfa/channels/pelis24.json b/plugin.video.alfa/channels/pelis24.json index 50f1c808..dfaf547b 100644 --- a/plugin.video.alfa/channels/pelis24.json +++ b/plugin.video.alfa/channels/pelis24.json @@ -10,7 +10,7 @@ "categories": [ "movie", "tvshow", - "vose", + "vose" ], "settings": [ { diff --git a/plugin.video.alfa/channels/pelis24.py b/plugin.video.alfa/channels/pelis24.py index fdfa1409..82ca64b2 100644 --- a/plugin.video.alfa/channels/pelis24.py +++ b/plugin.video.alfa/channels/pelis24.py @@ -102,14 +102,14 @@ def sub_search(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) - # logger.info(data) - data = scrapertools.find_single_match(data, '

Resultados encontrados(.*?)resppages') - # logger.info(data) - patron = '([^.*?' # url, img, title - patron += '([^<]+)' + data = scrapertools.find_single_match(data, 'Archivos (.*?)resppages') + patron = 'img alt="([^"]+)".*?' + patron += 'src="([^"]+)".*?' + patron += 'href="([^"]+)".*?' + patron += 'fechaestreno">([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches: + for scrapedtitle, scrapedthumbnail, scrapedurl, year in matches: if 'tvshows' not in scrapedurl: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle, @@ -133,18 +133,19 @@ def peliculas(item): itemlist = [] data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) data = scrapertools.decodeHtmlentities(data) # logger.info(data) # img, title - patron = '

([^<]+)' # year + patron = '(?is)movie-img img-box.*?alt="([^"]+).*?' + patron += 'src="([^"]+).*?' + patron += 'href="([^"]+).*?' + patron += 'fechaestreno">([^<]+).*?' + patron += 'quality">([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]: + for scrapedtitle, scrapedthumbnail, scrapedurl, year, quality in matches[item.page:item.page + 30]: title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality) itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3, @@ -172,10 +173,10 @@ def genresYears(item): data = re.sub(r"\n|\r|\t|\(.*?\)| |
", "", data) data = scrapertools.decodeHtmlentities(data) - if item.title == "Estrenos por Año": + if item.title == "Estrenos": patron_todas = 'ESTRENOS
(.*?) Géneros' else: - patron_todas = '

Generos

(.*?)").replace("&","&").replace('\"',"") patron = '
.*?src=(.*?) frameborder' - matches = re.compile(patron, re.DOTALL).findall(data) + matches = scrapertools.find_multiple_matches(data, patron) + headers = {'referer':item.url} for opt, urls_page in matches: language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s>Opción.*?(.*?)' % opt) - headers = {'referer':item.url} if 'trembed' in urls_page: urls_page = scrapertools.decodeHtmlentities(urls_page) - sub_data=httptools.downloadpage(urls_page).data - urls_page = scrapertools.find_single_match(sub_data, 'src="(.*?)" ') - itemlist.append(item.clone(title='[%s][%s]', - url=urls_page, - action='play', - language=language, - )) + sub_data = httptools.downloadpage(urls_page).data + urls_page = scrapertools.find_single_match(sub_data, 'src="([^"]+)" ') + if "repro.live" in urls_page: + server_repro(urls_page) + if "itatroniks.com" in urls_page: + server_itatroniks(urls_page) + for url in new_data: + itemlist.append(item.clone(title='[%s][%s]', + url=url, + action='play', + language=language, + )) + new_data = [] itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language)) return itemlist +def server_itatroniks(urls_page): + logger.info() + headers = {"Referer":urls_page} + id = scrapertools.find_single_match(urls_page, 'embed/(\w+)') + sub_data = httptools.downloadpage(urls_page, headers = headers).data + matches = scrapertools.find_multiple_matches(sub_data, 'button id="([^"]+)') + headers1 = ({"X-Requested-With":"XMLHttpRequest"}) + for serv in matches: + data1 = httptools.downloadpage("https://itatroniks.com/get/%s/%s" %(id, serv), headers = headers1).data + data_json = jsontools.load(data1) + urls_page = "" + try: + if "finished" == data_json["status"]: urls_page = "https://%s/embed/%s" %(data_json["server"], data_json["extid"]) + if "propio" == data_json["status"]: urls_page = "https://%s/e/%s" %(data_json["server"], data_json["extid"]) + except: + continue + new_data.append(urls_page) + + +def server_repro(urls_page): + logger.info() + headers = {"Referer":urls_page} + sub_data = httptools.downloadpage(urls_page, headers = headers).data + urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"') + for urls_page in urls_page1: + urls_page += "==" # base64.decode no decodifica si no tiene al final "==" + urls_page = base64.b64decode(urls_page) + if "repro.live" in urls_page: + data1 = httptools.downloadpage(urls_page, headers = headers).data + urls_page1 = scrapertools.find_multiple_matches(data1, 'source src="([^"]+)') + for urls_page in urls_page1: + new_data.append(urls_page) + else: + new_data.append(urls_page) + + def newest(categoria): logger.info() itemlist = [] diff --git a/plugin.video.alfa/channels/repelis.py b/plugin.video.alfa/channels/repelis.py index 60853f2f..8f64886b 100644 --- a/plugin.video.alfa/channels/repelis.py +++ b/plugin.video.alfa/channels/repelis.py @@ -186,7 +186,7 @@ def findvideos(item): for datos in dict: url1 = datos["url"] hostname = scrapertools.find_single_match(datos["hostname"].replace("www.",""), "(.*?)\.") - if "repelisgo" in hostname: continue + if "repelisgo" in hostname or "repelis.io" in datos["hostname"]: continue if hostname == "my": hostname = "mailru" titulo = "Ver en: " + hostname.capitalize() + " (" + cali[datos["quality"]] + ") (" + idio[datos["audio"]] + ")" itemlist.append( diff --git a/plugin.video.alfa/lib/unshortenit.py b/plugin.video.alfa/lib/unshortenit.py index 16916946..b7165a5e 100755 --- a/plugin.video.alfa/lib/unshortenit.py +++ b/plugin.video.alfa/lib/unshortenit.py @@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL): class UnshortenIt(object): - _adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net' + _adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net|thouth\.net' _linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co' _adfocus_regex = r'adfoc\.us' _lnxlu_regex = r'lnx\.lu' diff --git a/plugin.video.alfa/servers/netutv.py b/plugin.video.alfa/servers/netutv.py index 35c1cf42..840fb719 100755 --- a/plugin.video.alfa/servers/netutv.py +++ b/plugin.video.alfa/servers/netutv.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import re -import urllib +import urllib, random, base64 from core import httptools from core import jsontools @@ -12,7 +12,7 @@ from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) # http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX - page_url = page_url.replace("http://netu.tv/watch_video.php?v=", "http://hqq.tv/player/embed_player.php?vid=") + page_url = page_url.replace("/watch_video.php?v=", "/player/embed_player.php?vid=") data = httptools.downloadpage(page_url).data if "var userid = '';" in data.lower(): return False, "[netutv] El archivo no existe o ha sido borrado" @@ -21,72 +21,74 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) + video_urls = [] + if "hash=" in page_url: data = urllib.unquote(httptools.downloadpage(page_url).data) id_video = scrapertools.find_single_match(data, "vid':'([^']+)'") + page_url = "http://hqq.watch/player/embed_player.php?vid=%s" % id_video else: - id_video = page_url.rsplit("=", 1)[1] - page_url_hqq = "http://hqq.watch/player/embed_player.php?vid=%s&autoplay=no" % id_video - data_page_url_hqq = httptools.downloadpage(page_url_hqq, add_referer=True).data - js_wise = scrapertools.find_single_match(data_page_url_hqq, - "") - data_unwise = jswise(js_wise).replace("\\", "") - at = scrapertools.find_single_match(data_unwise, 'at=(\w+)') - http_referer = scrapertools.find_single_match(data_unwise, 'http_referer=(.*?)&') - url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on" \ - "&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=" % (id_video, at, http_referer) - data_player = httptools.downloadpage(url, add_referer=True).data - data_unescape = scrapertools.find_multiple_matches(data_player, 'document.write\(unescape\("([^"]+)"') - data = "" - for d in data_unescape: - data += urllib.unquote(d) - subtitle = scrapertools.find_single_match(data, 'value="sublangs=Spanish.*?sub=([^&]+)&') - if not subtitle: - subtitle = scrapertools.find_single_match(data, 'value="sublangs=English.*?sub=([^&]+)&') - data_unwise_player = "" - js_wise = scrapertools.find_single_match(data_player, - "") - if js_wise: - data_unwise_player = jswise(js_wise).replace("\\", "") - vars_data = scrapertools.find_single_match(data, '/player/get_md5.php",\s*\{(.*?)\}') - matches = scrapertools.find_multiple_matches(vars_data, '\s*([^:]+):\s*([^,]*)[,"]') - params = {} - for key, value in matches: - if key == "adb": - params[key] = "0/" - elif '"' in value: - params[key] = value.replace('"', '') - else: - value_var = scrapertools.find_single_match(data, 'var\s*%s\s*=\s*"([^"]+)"' % value) - if not value_var and data_unwise_player: - value_var = scrapertools.find_single_match(data_unwise_player, 'var\s*%s\s*=\s*"([^"]+)"' % value) - params[key] = value_var - params = urllib.urlencode(params) - head = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url} - data = httptools.downloadpage("http://hqq.watch/player/get_md5.php?" + params, headers=head).data - media_urls = [] - url_data = jsontools.load(data) - media_url = tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8" - if not media_url.startswith("http"): - media_url = "https:" + media_url - video_urls = [] - media = media_url + "|User-Agent=Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X)" - video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [netu.tv]", media, 0, subtitle]) - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + page_url = page_url.replace("/watch_video.php?v=", "/player/embed_player.php?vid=") + + page_url = page_url.replace('https://netu.tv/', 'http://hqq.watch/') + page_url = page_url.replace('https://waaw.tv/', 'http://hqq.watch/') + + data = httptools.downloadpage(page_url).data + # ~ logger.debug(data) + + js_wise = scrapertools.find_single_match(data, "") + data = jswise(js_wise).replace("\\", "") + # ~ logger.debug(data) + + alea = str(random.random())[2:] + data_ip = httptools.downloadpage('http://hqq.watch/player/ip.php?type=json&rand=%s' % alea).data + # ~ logger.debug(data_ip) + json_data_ip = jsontools.load(data_ip) + + url = scrapertools.find_single_match(data, 'self\.location\.replace\("([^)]+)\)') + url = url.replace('"+rand+"', alea) + url = url.replace('"+data.ip+"', json_data_ip['ip']) + url = url.replace('"+need_captcha+"', '0') #json_data_ip['need_captcha']) + url = url.replace('"+token', '') + # ~ logger.debug(url) + + headers = { "User-Agent": 'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.127 Large Screen Safari/533.4 GoogleTV/162671' } + data = httptools.downloadpage('http://hqq.watch'+url, headers=headers).data + # ~ logger.debug(data) + + codigo_js = scrapertools.find_multiple_matches(data, '") + data = jswise(js_wise).replace("\\", "") + # ~ logger.debug(data) + + variables = scrapertools.find_multiple_matches(data, 'var ([a-zA-Z0-9]+) = "([^"]+)";') + # ~ logger.debug(variables) + + for nombre, valor in variables: + # ~ logger.debug('%s %s' % (nombre, valor)) + if nombre == var_link_1: link_1 = valor + if nombre == var_server_2: server_2 = valor + + link_m3u8 = 'http://hqq.watch/player/get_md5.php?ver=2&at=%s&adb=0&b=1&link_1=%s&server_2=%s&vid=%s&ext=%s' % (at, link_1, server_2, vid, ext) + # ~ logger.debug(link_m3u8) + + video_urls.append(["[netu.tv]", link_m3u8]) + return video_urls -## Obtener la url del m3u8 -def tb(b_m3u8_2): - j = 0 - s2 = "" - while j < len(b_m3u8_2): - s2 += "\\u0" + b_m3u8_2[j:(j + 3)] - j += 3 - return s2.decode('unicode-escape').encode('ASCII', 'ignore') - - ## -------------------------------------------------------------------------------- ## --------------------------------------------------------------------------------