From de8e430edadb0d6998e5387ee4cfe683cabcdf03 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 14 May 2018 14:41:04 -0500 Subject: [PATCH 01/17] yourupload: fix --- plugin.video.alfa/servers/yourupload.py | 26 +++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/plugin.video.alfa/servers/yourupload.py b/plugin.video.alfa/servers/yourupload.py index db5b7647..aeda1f91 100755 --- a/plugin.video.alfa/servers/yourupload.py +++ b/plugin.video.alfa/servers/yourupload.py @@ -17,21 +17,27 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) - video_urls = [] data = httptools.downloadpage(page_url).data + url1 = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "") + referer = {'Referer': page_url} url = scrapertools.find_single_match(data, ' Date: Mon, 14 May 2018 14:41:47 -0500 Subject: [PATCH 02/17] yourupload: fix --- plugin.video.alfa/servers/yourupload.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugin.video.alfa/servers/yourupload.json b/plugin.video.alfa/servers/yourupload.json index a2ac2c43..dd920bd5 100755 --- a/plugin.video.alfa/servers/yourupload.json +++ b/plugin.video.alfa/servers/yourupload.json @@ -12,6 +12,10 @@ { "pattern": "embed[./]yourupload.com(?:/|.php\\?url=)([A-z0-9]+)", "url": "http://www.yourupload.com/embed/\\1" + }, + { + "pattern": "(yourupload.com/download\\?file=[A-z0-9]+)", + "url": "https://www.\\1&sendFile=true" } ] }, From d06efda5c904cd9d932479738422a0d86911f872 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 14 May 2018 14:44:33 -0500 Subject: [PATCH 03/17] miradetodo: fix --- plugin.video.alfa/channels/miradetodo.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/plugin.video.alfa/channels/miradetodo.py b/plugin.video.alfa/channels/miradetodo.py index 119830b9..4bbfbea4 100755 --- a/plugin.video.alfa/channels/miradetodo.py +++ b/plugin.video.alfa/channels/miradetodo.py @@ -323,6 +323,7 @@ def findvideos(item): matches = re.compile(patron, re.DOTALL).findall(data) for option, videoitem in matches: + sub = '' lang = scrapertools.find_single_match(src, '.*?(.*?)<\/a>' % option) if 'audio ' in lang.lower(): @@ -333,12 +334,21 @@ def findvideos(item): video_urls = scrapertools.find_multiple_matches(data, '
  • 0 and item.extra != 'findvideos': itemlist.append(Item(channel=item.channel, From 33109bee853007ab10277b0af2881ecc9087965f Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 14 May 2018 14:45:40 -0500 Subject: [PATCH 04/17] solocastellano: nuevo canal --- .../channels/solocastellano.json | 65 +++ plugin.video.alfa/channels/solocastellano.py | 429 ++++++++++++++++++ 2 files changed, 494 insertions(+) create mode 100644 plugin.video.alfa/channels/solocastellano.json create mode 100644 plugin.video.alfa/channels/solocastellano.py diff --git a/plugin.video.alfa/channels/solocastellano.json b/plugin.video.alfa/channels/solocastellano.json new file mode 100644 index 00000000..43cd51ac --- /dev/null +++ b/plugin.video.alfa/channels/solocastellano.json @@ -0,0 +1,65 @@ +{ + "id": "solocastellano", + "name": "SoloCastellano", + "active": true, + "adult": false, + "language": ["cast"], + "thumbnail": "https://s31.postimg.cc/uotcf3owb/solocastellano.png", + "banner": "", + "categories": [ + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar" + ] + }, + { + "id": "include_in_newest_castellano", + "type": "bool", + "label": "Incluir en Novedades - Castellano", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - Terror", + "default": true, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/solocastellano.py b/plugin.video.alfa/channels/solocastellano.py new file mode 100644 index 00000000..71b71613 --- /dev/null +++ b/plugin.video.alfa/channels/solocastellano.py @@ -0,0 +1,429 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import autoplay +from channels import filtertools +from core import httptools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger +from channelselector import get_thumb + +host = 'http://solocastellano.com/' +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + +IDIOMAS = {'Castellano': 'Castellano'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['yourupload', 'openload', 'sendvid'] + +vars = { + 'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://tawnestdplsnetps.pw/', + 'b48699bb49d4550f27879deeb948d4f7d9c5949a8': 'embed', + 'JzewJkLlrvcFnLelj2ikbA': 'php?url=', + 'p889c6853a117aca83ef9d6523335dc065213ae86': 'player', + 'e20fb341325556c0fc0145ce10d08a970538987': 'http://yourupload.com/embed/' +} + +tgenero = {"acción": "https://s3.postimg.cc/y6o9puflv/accion.png", + "animación": "https://s13.postimg.cc/5on877l87/animacion.png", + "aventura": "https://s10.postimg.cc/6su40czih/aventura.png", + "belico": "https://s23.postimg.cc/71itp9hcr/belica.png", + "ciencia ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png", + "comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png", + "comedia romántica": "https://s21.postimg.cc/xfsj7ua0n/romantica.png", + "cortometrajes": "https://s15.postimg.cc/kluxxwg23/cortometraje.png", + "crimen": "https://s4.postimg.cc/6z27zhirx/crimen.png", + "cristianas": "https://s7.postimg.cc/llo852fwr/religiosa.png", + "deportivas": "https://s13.postimg.cc/xuxf5h06v/deporte.png", + "drama": "https://s16.postimg.cc/94sia332d/drama.png", + "familiar": "https://s7.postimg.cc/6s7vdhqrf/familiar.png", + "fantasía": "https://s13.postimg.cc/65ylohgvb/fantasia.png", + "guerra": "https://s4.postimg.cc/n1h2jp2jh/guerra.png", + "historia": "https://s15.postimg.cc/fmc050h1n/historia.png", + "intriga": "https://s27.postimg.cc/v9og43u2b/intriga.png", + "misterios": "https://s1.postimg.cc/w7fdgf2vj/misterio.png", + "musical": "https://s29.postimg.cc/bbxmdh9c7/musical.png", + "romance": "https://s15.postimg.cc/fb5j8cl63/romance.png", + "suspenso": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png", + "terror": "https://s7.postimg.cc/yi0gij3gb/terror.png", + "thriller": "https://s22.postimg.cc/5y9g0jsu9/thriller.png"} + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + + itemlist = [] + + itemlist.append(item.clone(title="Todas", + action="lista", + thumbnail=get_thumb('all', auto=True), + fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png', + url=host + 'lista-de-peliculas/', + extra='peliculas' + )) + + itemlist.append(item.clone(title="Ultimas", + action="lista", + thumbnail=get_thumb('last', auto=True), + fanart='https://s22.postimg.cc/cb7nmhwv5/ultimas.png', + url=host, + extra='peliculas' + )) + + itemlist.append(item.clone(title="Generos", + action="generos", + thumbnail=get_thumb('genres', auto=True), + fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png', + url=host, + extra='peliculas' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + 'search?q=', + thumbnail=get_thumb('search', auto=True), + fanart='https://s30.postimg.cc/pei7txpa9/buscar.png')) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) + contentSerieName = '' + + patron = '
  • ([^<]+)<\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + + url = host + scrapedurl + title = scrapedtitle.lower() + if title in tgenero: + thumbnail = tgenero[title.lower()] + else: + thumbnail = '' + + itemactual = Item(channel=item.channel, + action='lista', + title=title, url=url, + thumbnail=thumbnail, + extra=item.extra + ) + + if title not in norep: + itemlist.append(itemactual) + norep.append(itemactual.title) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '
  • <\/b> ([^<]+)<\/span><\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + temp = 1 + infoLabels = item.infoLabels + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + title = scrapedtitle.strip('') + contentSeasonNumber = temp + infoLabels['season'] = contentSeasonNumber + thumbnail = item.thumbnail + plot = scrapertools.find_single_match(data, '

    ([^<]+)<\/p>') + itemlist.append(Item(channel=item.channel, + action="episodiosxtemp", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber, + plot=plot, + infoLabels=infoLabels + )) + temp = temp + 1 + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.contentSerieName, + extra1=item.extra1, + temp=str(temp) + )) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + temp = 'temporada-' + str(item.contentSeasonNumber) + patron = '

  • .\s*.\s*([^<]+)<' + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedepisode in matches: + url = host + scrapedurl + title = item.contentSerieName + ' ' + scrapedepisode + thumbnail = item.thumbnail + fanart = '' + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=item.plot, + extra=item.extra, + contentSerieName=item.contentSerieName + )) + + return itemlist + + +def episodiosxtemp(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + temp = 'temporada-' + str(item.contentSeasonNumber) + patron = '
  • .\s*.\s*([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + for scrapedurl, scrapedepisode in matches: + url = host + scrapedurl + title = item.contentSerieName + ' ' + scrapedepisode + scrapedepisode = re.sub(r'.*?x', '', scrapedepisode) + infoLabels['episode'] = scrapedepisode + thumbnail = item.thumbnail + fanart = '' + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=item.plot, + extra=item.extra, + contentSerieName=item.contentSerieName, + infoLabels=infoLabels + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def dec(encurl): + logger.info() + url = '' + encurl = encurl.translate(None, "',(,),;") + encurl = encurl.split('+') + + for cod in encurl: + if cod in vars: + url = url + vars[cod] + else: + url = url + cod + return url + + +def findvideos(item): + logger.info() + + itemlist = [] + langs = dict() + + data = httptools.downloadpage(item.url).data + patron = ' (.*?) <\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for key, value in matches: + langs[key] = value.strip() + + patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);' + matches = re.compile(patron, re.DOTALL).findall(data) + title = item.title + enlace = scrapertools.find_single_match(data, + 'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"') + + for scrapedlang, encurl in matches: + + if 'e20fb34' in encurl: + url = dec(encurl) + url = url + enlace + + else: + url = dec(encurl) + title = '' + server = '' + servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk', + '/jk':'streamcherry'} + server_id = re.sub(r'.*?embed|\.php.*', '', url) + if server_id and server_id in servers: + server = servers[server_id] + + + if (scrapedlang in langs) and langs[scrapedlang] in list_language: + language = IDIOMAS[langs[scrapedlang]] + else: + language = 'Latino' + # + # if langs[scrapedlang] == 'Latino': + # idioma = '[COLOR limegreen]LATINO[/COLOR]' + # elif langs[scrapedlang] == 'Sub Español': + # idioma = '[COLOR red]SUB[/COLOR]' + + if item.extra == 'peliculas': + title = item.contentTitle + ' (' + server + ') ' + language + plot = scrapertools.find_single_match(data, '

    ([^<]+)<\/p>') + else: + title = item.contentSerieName + ' (' + server + ') ' + language + plot = item.plot + + thumbnail = servertools.guess_server_thumbnail(title) + + if 'player' not in url and 'php' in url: + itemlist.append(item.clone(title=title, + url=url, + action="play", + plot=plot, + thumbnail=thumbnail, + server=server, + quality='', + language=language + )) + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + if 'your' in item.url: + item.url = 'http://www.yourupload.com/embed/' + scrapertools.find_single_match(data, 'src=".*?code=(.*?)"') + itemlist.append(item) + else: + itemlist = servertools.find_video_items(data=data) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return lista(item) + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria in ['peliculas','latino']: + item.url = host + elif categoria == 'infantiles': + item.url = host + 'search?q=animación' + elif categoria == 'terror': + item.url = host + 'search?q=terror' + + item.extra = 'peliculas' + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist From 0f772a4aeb95593acd5f74fc00d853556438e8fd Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 15 May 2018 08:47:38 -0500 Subject: [PATCH 05/17] minhateca: nuevo server --- plugin.video.alfa/servers/minhateca.json | 42 ++++++++++++++++++++++++ plugin.video.alfa/servers/minhateca.py | 27 +++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 plugin.video.alfa/servers/minhateca.json create mode 100644 plugin.video.alfa/servers/minhateca.py diff --git a/plugin.video.alfa/servers/minhateca.json b/plugin.video.alfa/servers/minhateca.json new file mode 100644 index 00000000..bf1053e5 --- /dev/null +++ b/plugin.video.alfa/servers/minhateca.json @@ -0,0 +1,42 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [ + { + "pattern": "(https://minhateca.com.br.*?video\\))", + "url": "\\1" + } + ] + }, + "free": true, + "id": "minhateca", + "name": "minhateca", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "Incluir en lista negra", + "type": "bool", + "visible": true + }, + { + "default": 0, + "enabled": true, + "id": "favorites_servers_list", + "label": "Incluir en lista de favoritos", + "lvalues": [ + "No", + "1", + "2", + "3", + "4", + "5" + ], + "type": "list", + "visible": false + } + ], + "thumbnail": "https://s9.postimg.cc/4nzpybunz/minhateca.png" +} diff --git a/plugin.video.alfa/servers/minhateca.py b/plugin.video.alfa/servers/minhateca.py new file mode 100644 index 00000000..65192951 --- /dev/null +++ b/plugin.video.alfa/servers/minhateca.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- + +import urllib + +from core import httptools +from core import jsontools +from core import scrapertools +from platformcode import logger + +def test_video_exists(page_url): + return True, "" + +def get_video_url(page_url, user="", password="", video_password=""): + logger.info("(page_url='%s')" % page_url) + video_urls = [] + data = httptools.downloadpage(page_url).data + _rvt = scrapertools.find_single_match(data, '__RequestVerificationToken.*?value="([^"]+)"') + _fileid = scrapertools.find_single_match(data, 'data-fileid="([^"]+)"') + post = {'fileId': _fileid, '__RequestVerificationToken': _rvt} + post = urllib.urlencode(post) + headers = {'X-Requested-With': 'XMLHttpRequest'} + url1 = "http://minhateca.com.br/action/License/Download" + data = httptools.downloadpage(url1, post = post, headers = headers).data + dict_data = jsontools.load(data) + videourl = dict_data["redirectUrl"] + "|Referer=%s" %page_url + video_urls.append([".MP4 [minhateca]", videourl]) + return video_urls From 3c7a2cef98be62f3930480b42d96972b25fcd9b8 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 15 May 2018 11:11:56 -0500 Subject: [PATCH 06/17] Update unshortenit.py --- plugin.video.alfa/lib/unshortenit.py | 68 ---------------------------- 1 file changed, 68 deletions(-) diff --git a/plugin.video.alfa/lib/unshortenit.py b/plugin.video.alfa/lib/unshortenit.py index ee953a06..07141af1 100755 --- a/plugin.video.alfa/lib/unshortenit.py +++ b/plugin.video.alfa/lib/unshortenit.py @@ -37,21 +37,15 @@ class UnshortenIt(object): _anonymz_regex = r'anonymz\.com' _shrink_service_regex = r'shrink-service\.it' _rapidcrypt_regex = r'rapidcrypt\.net' - _maxretries = 5 - _this_dir, _this_filename = os.path.split(__file__) _timeout = 10 def unshorten(self, uri, type=None): - domain = urlsplit(uri).netloc - if not domain: return uri, "No domain found in URI!" - had_google_outbound, uri = self._clear_google_outbound_proxy(uri) - if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly': return self._unshorten_adfly(uri) @@ -74,21 +68,15 @@ class UnshortenIt(object): return self._unshorten_anonymz(uri) if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE): return self._unshorten_rapidcrypt(uri) - return uri, 200 - def unwrap_30x(self, uri, timeout=10): - domain = urlsplit(uri).netloc self._timeout = timeout - loop_counter = 0 try: - if loop_counter > 5: raise ValueError("Infinitely looping redirect from URL: '%s'" % (uri,)) - # headers stop t.co from working so omit headers if this is a t.co link if domain == 't.co': r = httptools.downloadpage(uri, timeout=self._timeout) @@ -108,7 +96,6 @@ class UnshortenIt(object): only_headers=True) if not r.success: return uri, -1 - retries = 0 if 'location' in r.headers and retries < self._maxretries: r = httptools.downloadpage( @@ -120,10 +107,8 @@ class UnshortenIt(object): retries = retries + 1 else: return r.url, r.code - except Exception as e: return uri, str(e) - def _clear_google_outbound_proxy(self, url): ''' So google proxies all their outbound links through a redirect so they can detect outbound links. @@ -132,16 +117,13 @@ class UnshortenIt(object): This is useful for doing things like parsing google search results, or if you're scraping google docs, where google inserts hit-counters on all outbound links. ''' - # This is kind of hacky, because we need to check both the netloc AND # part of the path. We could use urllib.parse.urlsplit, but it's # easier and just as effective to use string checks. if url.startswith("http://www.google.com/url?") or \ url.startswith("https://www.google.com/url?"): - qs = urlparse(url).query query = parse_qs(qs) - if "q" in query: # Google doc outbound links (maybe blogspot, too) return True, query["q"].pop() elif "url" in query: # Outbound links from google searches @@ -150,7 +132,6 @@ class UnshortenIt(object): raise ValueError( "Google outbound proxy URL without a target url ('%s')?" % url) - return False, url def _unshorten_adfly(self, uri): @@ -163,14 +144,11 @@ class UnshortenIt(object): if len(ysmm) > 0: ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0]) - left = '' right = '' - for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]: left += c[0] right = c[1] + right - # Additional digit arithmetic encoded_uri = list(left + right) numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n)) @@ -178,12 +156,9 @@ class UnshortenIt(object): xor = int(first[1]) ^ int(second[1]) if xor < 10: encoded_uri[first[0]] = str(xor) - decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode() - if re.search(r'go\.php\?u\=', decoded_uri): decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode() - return decoded_uri, r.code else: return uri, 'No ysmm variable found' @@ -195,23 +170,15 @@ class UnshortenIt(object): ''' (Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase. This has necessidated a license change. - ''' - r = httptools.downloadpage(uri, timeout=self._timeout) - firstGet = time.time() - baseloc = r.url - if "/notfound/" in r.url or \ "(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data: return uri, 'Error: Link not found or requires a survey!' - link = None - content = r.data - regexes = [ r"

    .*?/a>.*?.*?[^<]+)", content) if not scripts: return uri, "No script bodies found?" - js = False - for script in scripts: # cleanup script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script) if re.search(r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script): js = script - if not js: return uri, "Could not find correct script?" - token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js) if not token: token = find_in_text(r"\?t=([a-f0-9]{40})", js) - assert token - authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y" l1 = find_in_text(r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js) l2 = find_in_text( r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);", js) - if any([not l1, not l2, not token]): return uri, "Missing required tokens?" - authkey = int(l1) + int(l2) - p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token)) r2 = httptools.downloadpage(p1_url, timeout=self._timeout) - p1_url = urljoin(baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1)) r2_1 = httptools.downloadpage(p1_url, timeout=self._timeout) - time_left = 5.033 - (time.time() - firstGet) xbmc.sleep(max(time_left, 0) * 1000) - p3_url = urljoin(baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false".format(tok=token, key=str(authkey))) r3 = httptools.downloadpage(p3_url, timeout=self._timeout) - resp_json = json.loads(r3.data) if "Url" in resp_json: return resp_json['Url'], r3.code - return "Wat", "wat" def inValidate(self, s): @@ -287,30 +237,23 @@ class UnshortenIt(object): # (s == null || s != null && (s.matches("[\r\n\t ]+") || s.equals("") || s.equalsIgnoreCase("about:blank"))) if not s: return True - if re.search("[\r\n\t ]+", s) or s.lower() == "about:blank": return True else: return False - def _unshorten_adfocus(self, uri): orig_uri = uri try: - r = httptools.downloadpage(uri, timeout=self._timeout) html = r.data - adlink = re.findall("click_url =.*;", html) - if len(adlink) > 0: uri = re.sub('^click_url = "|"\;$', '', adlink[0]) if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri): http_header = dict() http_header["Host"] = "adfoc.us" http_header["Referer"] = orig_uri - r = httptools.downloadpage(uri, headers=http_header, timeout=self._timeout) - uri = r.url return uri, r.code else: @@ -340,20 +283,16 @@ class UnshortenIt(object): try: r = httptools.downloadpage(uri, timeout=self._timeout) html = r.data - session_id = re.findall(r'sessionId\:(.*?)\"\,', html) if len(session_id) > 0: session_id = re.sub(r'\s\"', '', session_id[0]) - http_header = dict() http_header["Content-Type"] = "application/x-www-form-urlencoded" http_header["Host"] = "sh.st" http_header["Referer"] = uri http_header["Origin"] = "http://sh.st" http_header["X-Requested-With"] = "XMLHttpRequest" - xbmc.sleep(5 * 1000) - payload = {'adSessionId': session_id, 'callback': 'c'} r = httptools.downloadpage( 'http://sh.st/shortest-url/end-adsession?' + @@ -361,7 +300,6 @@ class UnshortenIt(object): headers=http_header, timeout=self._timeout) response = r.data[6:-2].decode('utf-8') - if r.code == 200: resp_uri = json.loads(response)['destinationUrl'] if resp_uri is not None: @@ -401,12 +339,9 @@ class UnshortenIt(object): try: r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False) html = r.data - uri = re.findall(r"", html)[0] - from core import scrapertools uri = scrapertools.decodeHtmlentities(uri) - uri = uri.replace("/", "/") \ .replace(":", ":") \ .replace(".", ".") \ @@ -414,7 +349,6 @@ class UnshortenIt(object): .replace("#", "#") \ .replace("?", "?") \ .replace("_", "_") - return uri, r.code except Exception as e: @@ -424,9 +358,7 @@ class UnshortenIt(object): try: r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False) html = r.data - uri = re.findall(r'Click to continue', html)[0] - return uri, r.code except Exception as e: From 13c4d56d9059380aab290fe547bf492834a7634d Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 15 May 2018 14:49:40 -0500 Subject: [PATCH 07/17] descargacineclasico: fix enlaces --- .../channels/descargacineclasico.py | 178 ++++++------------ 1 file changed, 62 insertions(+), 116 deletions(-) diff --git a/plugin.video.alfa/channels/descargacineclasico.py b/plugin.video.alfa/channels/descargacineclasico.py index 8bfe9c9b..88291a9d 100755 --- a/plugin.video.alfa/channels/descargacineclasico.py +++ b/plugin.video.alfa/channels/descargacineclasico.py @@ -1,15 +1,15 @@ -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- import re -import urlparse from channelselector import get_thumb -from core import scrapertools -from core import servertools -from core.item import Item -from core.tmdb import Tmdb from platformcode import logger -from servers.decrypters import expurl +from core import scrapertools, httptools +from core import servertools +from core import tmdb +from core.item import Item +from lib import unshortenit + +host = "http://www.descargacineclasico.net" def agrupa_datos(data): @@ -22,54 +22,36 @@ def agrupa_datos(data): def mainlist(item): logger.info() - - thumb_buscar = get_thumb("search.png") - itemlist = [] itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas", - url="http://www.descargacineclasico.net/", viewmode="movie_with_plot", + url=host, viewmode="movie_with_plot", thumbnail=get_thumb('last', auto=True))) itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero", - url="http://www.descargacineclasico.net/", + url=host, thumbnail=get_thumb('genres', auto=True))) itemlist.append( - Item(channel=item.channel, title="Buscar", action="search", url="http://www.descargacineclasico.net/", + Item(channel=item.channel, title="Buscar", action="search", url=host, thumbnail=get_thumb('search', auto=True))) - return itemlist def porGenero(item): logger.info() - itemlist = [] - data = scrapertools.cache_page(item.url) - logger.info("data=" + data) - + data = httptools.downloadpage(item.url).data patron = '
      (.*?)
    ' - data = re.compile(patron, re.DOTALL).findall(data) - patron = '.*?href="([^"]+).*?>([^<]+)' - matches = re.compile(patron, re.DOTALL).findall(data[0]) - - for url, genero in matches: - itemlist.append( - Item(channel=item.channel, action="agregadas", title=genero, url=url, viewmode="movie_with_plot")) - + data = re.compile(patron,re.DOTALL).findall(data) + patron = '.*?href="([^"]+).*?>([^<]+)' + matches = re.compile(patron,re.DOTALL).findall(data[0]) + for url,genero in matches: + itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot")) return itemlist -def search(item, texto): +def search(item,texto): logger.info() - - ''' - texto_get = texto.replace(" ","%20") - texto_post = texto.replace(" ","+") - item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post) - ''' - texto = texto.replace(" ", "+") - item.url = "http://www.descargacineclasico.net/?s=" + texto - + item.url = host + "?s=" + texto try: return agregadas(item) # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla @@ -83,98 +65,62 @@ def search(item, texto): def agregadas(item): logger.info() itemlist = [] - ''' - # Descarga la pagina - if "?search=" in item.url: - url_search = item.url.split("?search=") - data = scrapertools.cache_page(url_search[0], url_search[1]) - else: - data = scrapertools.cache_page(item.url) - logger.info("data="+data) - ''' - - data = scrapertools.cache_page(item.url) - logger.info("data=" + data) - - # Extrae las entradas - fichas = re.sub(r"\n|\s{2}", "", scrapertools.get_match(data, '
  • 2
  • 3
  • 4
  • 5
  • 6
  • - patron_nextpage = r'
  • (.*?)/i>' + elif 'Año' in item.title: + patron = '
  • (.*?)' + elif 'Calidad' in item.title: + patron = 'menu-item-object-dtquality menu-item-\d+>(.*?)' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle + plot='' + if 'Genero' in item.title: + quantity = scrapertools.find_single_match(scrapedtitle,' (.*?)<') + title = scrapertools.find_single_match(scrapedtitle,'(.*?)
    (.*?).*?quality>(.*?)' + patron += '<\/div>.*?<\/h3>(.*?)<\/span><\/div>.*?flags(.*?)metadata' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches: + + + title = '%s [%s] [%s]' % (scrapedtitle, year, quality) + contentTitle = scrapedtitle + thumbnail = scrapedthumbnail + url = scrapedurl + language = get_language(lang_data) + + itemlist.append(item.clone(action='findvideos', + title=title, + url=url, + thumbnail=thumbnail, + contentTitle=contentTitle, + language=language, + quality=quality, + infoLabels={'year':year})) + + elif item.type == 'tvshows': + patron = '
    (.*?)<' % item.infoLabels['season'] + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + + for scrapedepisode, scrapedurl, scrapedtitle in matches: + + infoLabels['episode'] = scrapedepisode + url = scrapedurl + title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle) + + itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + + selector_url = scrapertools.find_multiple_matches(data, 'class=metaframe rptss src=(.*?) frameborder=0 ') + + for lang in selector_url: + data = get_source('https:'+lang) + urls = scrapertools.find_multiple_matches(data, 'data-playerid=(.*?)>') + subs = '' + lang = scrapertools.find_single_match(lang, 'lang=([^+]+)') + language = IDIOMAS[lang] + + if item.contentType == 'episode': + quality = 'SD' + else: + quality = item.quality + + for url in urls: + final_url = httptools.downloadpage('https:'+url).data + if 'vip' in url: + file_id = scrapertools.find_single_match(url, 'file=(.*?)&') + if language=='VOSE': + sub = scrapertools.find_single_match(url, 'sub=(.*?)&') + subs = 'https:%s' % sub + post = {'link':file_id} + post = urllib.urlencode(post) + hidden_url = 'https://streamango.poseidonhd.com/repro//plugins/gkpluginsphp.php' + data_url = httptools.downloadpage(hidden_url, post=post).data + dict_vip_url = jsontools.load(data_url) + url = dict_vip_url['link'] + else: + url = 'https:%s' % url + new_url = url.replace('embed','stream') + url = httptools.downloadpage(new_url, follow_redirects=False).headers.get('location') + #title = '%s [%s]' % (item.title, language) + itemlist.append(item.clone(title='[%s] [%s]', url=url, action='play', subtitle=subs, + language=language, quality=quality, infoLabels = item.infoLabels)) + itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language)) + + # Requerido para Filtrar enlaces + + if __comprueba_enlaces__: + itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if item.contentType != 'episode': + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) + + return itemlist + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return search_results(item) + else: + return [] + +def search_results(item): + logger.info() + + itemlist=[] + + data=get_source(item.url) + patron = '
    .*?(.*?).*?meta.*?year>(.*?)<(.*?)

    (.*?)

    ' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches: + + title = scrapedtitle + url = scrapedurl + thumbnail = scrapedthumb + plot = scrapedplot + language = get_language(lang_data) + if language: + action = 'findvideos' + else: + action = 'seasons' + + new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot, + action=action, + language=language, infoLabels={'year':year}) + if new_item.action == 'findvideos': + new_item.contentTitle = new_item.title + else: + new_item.contentSerieName = new_item.title + + itemlist.append(new_item) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria in ['peliculas']: + item.url = host + 'movies' + elif categoria == 'infantiles': + item.url = host + 'genre/animacion/' + elif categoria == 'terror': + item.url = host + 'genre/terror/' + item.type='movies' + itemlist = list_all(item) + if itemlist[-1].title == 'Siguiente >>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist From 38e27aa01469285c597b1e4b80d79d1d8345e550 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 15 May 2018 14:53:12 -0500 Subject: [PATCH 09/17] estadepelis: fix From 8b2c0adbf4218378ac7f2945b60134d39a859441 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 15 May 2018 14:53:59 -0500 Subject: [PATCH 10/17] estadepelis: fix --- plugin.video.alfa/channels/estadepelis.json | 3 +- plugin.video.alfa/channels/estadepelis.py | 40 ++++++++------------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/plugin.video.alfa/channels/estadepelis.json b/plugin.video.alfa/channels/estadepelis.json index 1bc265be..ff8f2a95 100755 --- a/plugin.video.alfa/channels/estadepelis.json +++ b/plugin.video.alfa/channels/estadepelis.json @@ -28,7 +28,8 @@ "lvalues": [ "No filtrar", "Latino", - "VOS" + "VOS", + "Castellano" ] }, { diff --git a/plugin.video.alfa/channels/estadepelis.py b/plugin.video.alfa/channels/estadepelis.py index 07989ada..325ae879 100755 --- a/plugin.video.alfa/channels/estadepelis.py +++ b/plugin.video.alfa/channels/estadepelis.py @@ -22,7 +22,7 @@ list_quality = [] list_servers = ['yourupload', 'openload', 'sendvid'] vars = { - 'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://www.estadepelis.com/', + 'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://tawnestdplsnetps.pw/', 'b48699bb49d4550f27879deeb948d4f7d9c5949a8': 'embed', 'JzewJkLlrvcFnLelj2ikbA': 'php?url=', 'p889c6853a117aca83ef9d6523335dc065213ae86': 'player', @@ -194,8 +194,8 @@ def generos(item): itemlist = [] norep = [] data = httptools.downloadpage(item.url).data - - patron = '
  • ([^<]+)<\/a>' + logger.debug(data) + patron = '
  • ([^<]+)<\/a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -342,7 +342,6 @@ def findvideos(item): langs = dict() data = httptools.downloadpage(item.url).data - logger.debug('data: %s' % data) patron = ' (.*?) <\/a>' matches = re.compile(patron, re.DOTALL).findall(data) @@ -365,26 +364,28 @@ def findvideos(item): url = dec(encurl) title = '' server = '' - servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk'} + servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk', + '/jk':'streamcherry'} server_id = re.sub(r'.*?embed|\.php.*', '', url) if server_id and server_id in servers: server = servers[server_id] - logger.debug('server_id: %s' % server_id) - if langs[scrapedlang] in list_language: + + if (scrapedlang in langs) and langs[scrapedlang] in list_language: language = IDIOMAS[langs[scrapedlang]] else: language = 'Latino' - if langs[scrapedlang] == 'Latino': - idioma = '[COLOR limegreen]LATINO[/COLOR]' - elif langs[scrapedlang] == 'Sub Español': - idioma = '[COLOR red]SUB[/COLOR]' + # + # if langs[scrapedlang] == 'Latino': + # idioma = '[COLOR limegreen]LATINO[/COLOR]' + # elif langs[scrapedlang] == 'Sub Español': + # idioma = '[COLOR red]SUB[/COLOR]' if item.extra == 'peliculas': - title = item.contentTitle + ' (' + server + ') ' + idioma + title = item.contentTitle + ' (' + server + ') ' + language plot = scrapertools.find_single_match(data, '

    ([^<]+)<\/p>') else: - title = item.contentSerieName + ' (' + server + ') ' + idioma + title = item.contentSerieName + ' (' + server + ') ' + language plot = item.plot thumbnail = servertools.guess_server_thumbnail(title) @@ -399,7 +400,6 @@ def findvideos(item): quality='', language=language )) - logger.debug('url: %s' % url) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) @@ -423,23 +423,13 @@ def findvideos(item): def play(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url, add_referer=True).data + data = httptools.downloadpage(item.url).data if 'your' in item.url: item.url = 'http://www.yourupload.com/embed/' + scrapertools.find_single_match(data, 'src=".*?code=(.*?)"') itemlist.append(item) else: - itemlist = servertools.find_video_items(data=data) - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append(Item(channel=item.channel, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, - action="add_pelicula_to_library", - extra="findvideos", - contentTitle=item.contentTitle - )) - return itemlist From e66e0fd94e60514acb0dddf3566afdd245ab120b Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 19 May 2018 09:13:02 -0500 Subject: [PATCH 11/17] pelismedia: nuevo canal --- plugin.video.alfa/channels/pelismedia.json | 88 +++++++ plugin.video.alfa/channels/pelismedia.py | 289 +++++++++++++++++++++ 2 files changed, 377 insertions(+) create mode 100644 plugin.video.alfa/channels/pelismedia.json create mode 100644 plugin.video.alfa/channels/pelismedia.py diff --git a/plugin.video.alfa/channels/pelismedia.json b/plugin.video.alfa/channels/pelismedia.json new file mode 100644 index 00000000..72726692 --- /dev/null +++ b/plugin.video.alfa/channels/pelismedia.json @@ -0,0 +1,88 @@ +{ + "id": "pelismedia", + "name": "PelisMedia", + "active": true, + "adult": false, + "language": ["lat"], + "thumbnail": "https://s14.postimg.cc/eclmujsch/12715507_1112827435402340_7302361220060367711_n.jpg", + "categories": ["movie", "tvshow"], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Series", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - terror", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_latino", + "type": "bool", + "label": "Incluir en Novedades - Latino", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1", + "Ninguno" + ] + }, + { + "id": "episodios_x_pag", + "type": "list", + "label": "Episodios por página", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "10", + "15", + "20", + "25", + "30" + ] + }, + { + "id": "temporada_o_todos", + "type": "bool", + "label": "Mostrar temporadas", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelismedia.py b/plugin.video.alfa/channels/pelismedia.py new file mode 100644 index 00000000..0712b03f --- /dev/null +++ b/plugin.video.alfa/channels/pelismedia.py @@ -0,0 +1,289 @@ +# -*- coding: utf-8 -*- +from core import httptools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger +from channelselector import get_thumb + +__perfil__ = int(config.get_setting('perfil', 'pelisultra')) + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] + +if __perfil__ < 3: + color1, color2, color3 = perfil[__perfil__] +else: + color1 = color2 = color3 = "" + +host="http://www.pelismedia.com" + +def mainlist(item): + logger.info() + itemlist = [] + item.thumbnail = get_thumb('movies', auto=True) + itemlist.append(item.clone(title="Películas:", folder=False, text_color="0xFFD4AF37", text_bold=True)) + itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host, + thumbnail=get_thumb('newest', auto=True))) + itemlist.append(Item(channel = item.channel, title = " Estrenos", action = "peliculas", url = host + "/genero/estrenos/", + thumbnail=get_thumb('premieres', auto=True))) + itemlist.append(Item(channel = item.channel, title = " Por género", action = "genero", url = host + "/genero/", + thumbnail=get_thumb('genres', auto=True) )) + item.thumbnail = get_thumb('tvshows', auto=True) + itemlist.append(item.clone(title="Series:", folder=False, text_color="0xFFD4AF37", text_bold=True)) + itemlist.append(Item(channel = item.channel, title = " Todas las series", action = "series", url = host + "/series/", + thumbnail=get_thumb('all', auto=True))) + itemlist.append(Item(channel = item.channel, title = " Nuevos episodios", action = "nuevos_episodios", url = host + "/episodio/", + thumbnail=get_thumb('new episodes', auto=True))) + itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", url = host, text_color="red", text_bold=True, + thumbnail=get_thumb('search', auto=True))) + itemlist.append(item.clone(title="Configurar canal...", text_color="green", action="configuracion", text_bold=True)) + return itemlist + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria in ["peliculas", "latino"]: + item.url = host + itemlist = peliculas(item) + elif categoria == 'terror': + item.url = host + '/genero/terror/' + itemlist = peliculas(item) + elif categoria == "series": + item.url = host + "/episodio/" + itemlist = nuevos_episodios(item) + if "Pagina" in itemlist[-1].title: + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + +def peliculas(item): + #logger.info() + logger.info(item) + itemlist = [] + data = httptools.downloadpage(item.url).data + + data2 = scrapertools.find_single_match(data,'(?s)

  • ' + matches = scrapertools.find_multiple_matches(data, patron) + # Se quita "Estrenos" de la lista porque tiene su propio menu + matches.pop(0) + + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(action = "peliculas", channel = item.channel, title = scrapedtitle, url = scrapedurl)) + + return itemlist + +def series(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + # Se saca la info + patron = '(?s)class="ml-item.*?' # base + patron += 'a href="([^"]+).*?' # url + patron += 'img src="([^"]+).*?' # imagen + patron += 'alt="([^"]+).*?' # titulo + patron += 'class="year">(\d{4})' # año + matches = scrapertools.find_multiple_matches(data, patron) + + #if config.get_setting('temporada_o_todos', 'pelisultra') == 0: + if config.get_setting('temporada_o_todos', 'pelisultra'): + accion="temporadas" + else: + accion="episodios" + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches: + itemlist.append(Item(action = accion, channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ")", contentSerieName=scrapedtitle, contentType="tvshow", thumbnail = scrapedthumbnail, url = scrapedurl, infoLabels={'year':scrapedyear})) + + # InfoLabels: + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + # Pagina siguiente + patron_siguiente='class="pag_b"> item.page + episodios_por_pagina: + itemlist_page.append(item.clone(title = ">>> Pagina siguiente", page = item.page + episodios_por_pagina)) + + # InfoLabels: + tmdb.set_infoLabels_itemlist(itemlist_page, seekTmdb=True) + + return itemlist_page + +def nuevos_episodios(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '(?s).*?' # base + patron += '' # url + patron += '(.*?).*?' # nombre_serie + patron += ' Date: Sat, 19 May 2018 09:13:52 -0500 Subject: [PATCH 12/17] Delete minhateca.json --- plugin.video.alfa/servers/minhateca.json | 42 ------------------------ 1 file changed, 42 deletions(-) delete mode 100644 plugin.video.alfa/servers/minhateca.json diff --git a/plugin.video.alfa/servers/minhateca.json b/plugin.video.alfa/servers/minhateca.json deleted file mode 100644 index bf1053e5..00000000 --- a/plugin.video.alfa/servers/minhateca.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "(https://minhateca.com.br.*?video\\))", - "url": "\\1" - } - ] - }, - "free": true, - "id": "minhateca", - "name": "minhateca", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ], - "thumbnail": "https://s9.postimg.cc/4nzpybunz/minhateca.png" -} From 9ef463ac62d94f884de734d0b3dd274eaa240b21 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 19 May 2018 09:14:03 -0500 Subject: [PATCH 13/17] Delete minhateca.py --- plugin.video.alfa/servers/minhateca.py | 27 -------------------------- 1 file changed, 27 deletions(-) delete mode 100644 plugin.video.alfa/servers/minhateca.py diff --git a/plugin.video.alfa/servers/minhateca.py b/plugin.video.alfa/servers/minhateca.py deleted file mode 100644 index 65192951..00000000 --- a/plugin.video.alfa/servers/minhateca.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- - -import urllib - -from core import httptools -from core import jsontools -from core import scrapertools -from platformcode import logger - -def test_video_exists(page_url): - return True, "" - -def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) - video_urls = [] - data = httptools.downloadpage(page_url).data - _rvt = scrapertools.find_single_match(data, '__RequestVerificationToken.*?value="([^"]+)"') - _fileid = scrapertools.find_single_match(data, 'data-fileid="([^"]+)"') - post = {'fileId': _fileid, '__RequestVerificationToken': _rvt} - post = urllib.urlencode(post) - headers = {'X-Requested-With': 'XMLHttpRequest'} - url1 = "http://minhateca.com.br/action/License/Download" - data = httptools.downloadpage(url1, post = post, headers = headers).data - dict_data = jsontools.load(data) - videourl = dict_data["redirectUrl"] + "|Referer=%s" %page_url - video_urls.append([".MP4 [minhateca]", videourl]) - return video_urls From 3fd25a0568cc83142219b11528f6bd0cd15ced69 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 19 May 2018 09:15:44 -0500 Subject: [PATCH 14/17] Update doramasmp4.json --- plugin.video.alfa/channels/doramasmp4.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/channels/doramasmp4.json b/plugin.video.alfa/channels/doramasmp4.json index 486d9d66..52112148 100644 --- a/plugin.video.alfa/channels/doramasmp4.json +++ b/plugin.video.alfa/channels/doramasmp4.json @@ -27,8 +27,9 @@ "visible": true, "lvalues": [ "No filtrar", - "VOSE" + "VOSE", + "VO" ] } ] -} \ No newline at end of file +} From 6e7d9be23448de6cef22a023c99e94906cbec054 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 19 May 2018 09:16:46 -0500 Subject: [PATCH 15/17] doramasmp4: fix --- plugin.video.alfa/channels/doramasmp4.py | 123 ++++++++++++++--------- 1 file changed, 76 insertions(+), 47 deletions(-) diff --git a/plugin.video.alfa/channels/doramasmp4.py b/plugin.video.alfa/channels/doramasmp4.py index c86c98d6..7926eeee 100644 --- a/plugin.video.alfa/channels/doramasmp4.py +++ b/plugin.video.alfa/channels/doramasmp4.py @@ -18,7 +18,7 @@ from channelselector import get_thumb host = 'https://www.doramasmp4.com/' -IDIOMAS = {'sub': 'VOSE'} +IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'} list_language = IDIOMAS.values() list_quality = [] list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload'] @@ -38,7 +38,7 @@ def mainlist(item): itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu", thumbnail=get_thumb('doramas', auto=True), type='dorama')) itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", - url=host + 'catalogue?type[]=pelicula', thumbnail=get_thumb('movies', auto=True), + url=host + 'catalogue?format=pelicula', thumbnail=get_thumb('movies', auto=True), type='movie')) itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?q=', thumbnail=get_thumb('search', auto=True))) @@ -63,9 +63,8 @@ def list_all(item): itemlist = [] data = get_source(item.url) - - patron = '(.*?)' - patron += '
    (.*?)' + patron = '' in data: + language = IDIOMAS['vo'] + else: + language = IDIOMAS['sub'] + if item.type !='episode' and '' not in data: item.type = 'dorama' item.contentSerieName = item.contentTitle item.contentTitle = '' return episodes(item) else: - itemlist.extend(servertools.find_video_items(data=data)) - for video_item in itemlist: - if 'sgl.php' in video_item.url: - headers = {'referer': item.url} - patron_gvideo = "'file':'(.*?)','type'" - data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data - video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo) - duplicated.append(video_item.url) - video_item.channel = item.channel - video_item.infoLabels = item.infoLabels - video_item.language=IDIOMAS['sub'] - - patron = 'var item = {id: (\d+), episode: (\d+),' - matches = re.compile(patron, re.DOTALL).findall(data) - - for id, episode in matches: - data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data) - sources = data_json['options'] - for src in sources: - url = sources[src] - - if 'sgl.php' in url: - headers = {'referer':item.url} - patron_gvideo = "'file':'(.*?)','type'" - data_gvideo = httptools.downloadpage(url, headers = headers).data - url = scrapertools.find_single_match(data_gvideo, patron_gvideo) - - new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play', - infoLabels=item.infoLabels) - if url != '' and url not in duplicated: - itemlist.append(new_item) - duplicated.append(url) - try: - itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) - except: - pass + for video_url in matches: + video_data = httptools.downloadpage(video_url, headers=headers).data + server = '' + if 'Media player DMP4' in video_data: + url = scrapertools.find_single_match(video_data, "sources: \[\{'file':'(.*?)'") + server = 'Directo' + else: + url = scrapertools.find_single_match(video_data, '