From 193efdda6d84501f3a861b7a256a5b0363136cf0 Mon Sep 17 00:00:00 2001 From: Intel1 Date: Wed, 7 Nov 2018 10:14:32 -0500 Subject: [PATCH] Actualizados danimados: fix varias temporadas diskokosmiko: eliminado pelisgratis: fix rexpelis: fix tmdb seriesblancoxyz: eliminado sipeliculas: cambio de dominio yape: nueva calidad upvid: fix vidtome: fix xdrive: fix --- plugin.video.alfa/channels/danimados.py | 59 +-- plugin.video.alfa/channels/diskokosmiko.json | 64 --- plugin.video.alfa/channels/diskokosmiko.py | 366 ------------------ plugin.video.alfa/channels/pelisgratis.py | 23 +- plugin.video.alfa/channels/rexpelis.py | 10 + .../channels/seriesblancoxyz.json | 38 -- plugin.video.alfa/channels/seriesblancoxyz.py | 323 ---------------- plugin.video.alfa/channels/sipeliculas.py | 2 +- plugin.video.alfa/channels/yape.py | 2 +- plugin.video.alfa/servers/diskokosmiko.json | 43 -- plugin.video.alfa/servers/diskokosmiko.py | 50 --- plugin.video.alfa/servers/upvid.py | 11 +- plugin.video.alfa/servers/vidtome.py | 20 +- plugin.video.alfa/servers/xdrive.py | 13 +- 14 files changed, 78 insertions(+), 946 deletions(-) delete mode 100644 plugin.video.alfa/channels/diskokosmiko.json delete mode 100644 plugin.video.alfa/channels/diskokosmiko.py delete mode 100644 plugin.video.alfa/channels/seriesblancoxyz.json delete mode 100644 plugin.video.alfa/channels/seriesblancoxyz.py delete mode 100644 plugin.video.alfa/servers/diskokosmiko.json delete mode 100644 plugin.video.alfa/servers/diskokosmiko.py diff --git a/plugin.video.alfa/channels/danimados.py b/plugin.video.alfa/channels/danimados.py index 62a65bb9..cadfab67 100644 --- a/plugin.video.alfa/channels/danimados.py +++ b/plugin.video.alfa/channels/danimados.py @@ -113,7 +113,6 @@ def lista(item): data_lista = scrapertools.find_single_match(data, '
(.+?)<\/div><\/div>
') patron = '([^.+?.+?
(.+?)<\/div>' - #scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot matches = scrapertools.find_multiple_matches(data_lista, patron) for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches: if item.title=="Peliculas Animadas": @@ -134,13 +133,13 @@ def episodios(item): itemlist = [] infoLabels = {} data = httptools.downloadpage(item.url).data - patron = '(?s)
    (.+?)<\/ul>' + patron = '(?s)
      (.+?)Compartido' data_lista = scrapertools.find_single_match(data,patron) contentSerieName = item.title patron_caps = 'href="([^"]+)".*?' patron_caps += 'src="([^"]+)".*?' patron_caps += 'numerando">([^<]+).*?' - patron_caps += 'link_go">.*?>([^<]+)' + patron_caps += 'episodiotitle">.*?>([^<]+)' matches = scrapertools.find_multiple_matches(data_lista, patron_caps) for scrapedurl, scrapedthumbnail, scrapedtempepi, scrapedtitle in matches: tempepi=scrapedtempepi.split(" - ") @@ -161,25 +160,26 @@ def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - patron = '
      (.+?)<\/nav>' - data1 = scrapertools.find_single_match(data, patron) - patron = "changeLink\('([^']+)'\)" - matches = scrapertools.find_multiple_matches(data1, patron) - for url64 in matches: - url1 =base64.b64decode(url64) - if 'danimados' in url1: - url = 'https:'+url1.replace('stream/', 'stream_iframe/') - id = scrapertools.find_single_match(url, 'iframe/(.*)') - url = url.replace(id, base64.b64encode(id)) - new_data = httptools.downloadpage(url).data - new_data = new_data.replace('"',"'") - url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)") - if "zkstream" in url or "cloudup" in url: - url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "") - else: - url1 = url - if url1: - itemlist.append(item.clone(title='%s',url=url1, action="play")) + if "onclick=\"changeLink('" in data: + patron = "onclick=.changeLink\('([^']+)'" + matches = scrapertools.find_multiple_matches(data, patron) + for id in matches: + url = devuelve_enlace(base64.b64decode(id)) + itemlist.append(item.clone(title="Ver en %s",url=url, action="play")) + else: + patron = 'data-type="([^"]+).*?' + patron += 'data-post="([^"]+).*?' + patron += 'data-nume="([^"]+).*?' + patron += 'server">([^<]+).*?' + matches = scrapertools.find_multiple_matches(data, patron) + headers = {"X-Requested-With":"XMLHttpRequest"} + for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches: + post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume) + data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data + url1 = scrapertools.find_single_match(data1, "src='([^']+)") + url1 = devuelve_enlace(url1) + if url1: + itemlist.append(item.clone(title="Ver en %s",url=url1, action="play")) tmdb.set_infoLabels(itemlist) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary': @@ -193,3 +193,18 @@ def findvideos(item): def play(item): item.thumbnail = item.contentThumbnail return [item] + + +def devuelve_enlace(url1): + if 'danimados' in url1: + url = 'https:'+url1.replace('stream/', 'stream_iframe/') + id = scrapertools.find_single_match(url, 'iframe/(.*)') + url = url.replace(id, base64.b64encode(id)) + new_data = httptools.downloadpage(url).data + new_data = new_data.replace('"',"'") + url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)") + if "zkstream" in url or "cloudup" in url: + url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "") + else: + url1 = url + return url1 diff --git a/plugin.video.alfa/channels/diskokosmiko.json b/plugin.video.alfa/channels/diskokosmiko.json deleted file mode 100644 index 0844956a..00000000 --- a/plugin.video.alfa/channels/diskokosmiko.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "id": "diskokosmiko", - "name": "Diskokosmiko", - "language": ["cast", "lat"], - "active": true, - "adult": false, - "version": 1, - "thumbnail": "http://i.imgur.com/EjbfM7p.png?1", - "banner": "copiapop.png", - "categories": [ - "movie", - "tvshow" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "diskokosmikouser", - "type": "text", - "color": "0xFFC52020", - "label": "Usuario Diskokosmiko", - "enabled": true, - "visible": true - }, - { - "id": "diskokosmikopassword", - "type": "text", - "color": "0xFFC52020", - "hidden": true, - "label": "Password Diskokosmiko", - "enabled": "!eq(-1,'')", - "visible": true - }, - { - "id": "adult_content", - "type": "bool", - "color": "0xFFd50b0b", - "label": "Mostrar contenido adulto en las búsquedas", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 3, - "enabled": true, - "visible": true, - "lvalues": [ - "Sin color", - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/diskokosmiko.py b/plugin.video.alfa/channels/diskokosmiko.py deleted file mode 100644 index e73f9f48..00000000 --- a/plugin.video.alfa/channels/diskokosmiko.py +++ /dev/null @@ -1,366 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import threading -import urllib -import xbmc - -from core import downloadtools -from core import filetools -from core import httptools -from core import jsontools -from core import scrapertools -from core.item import Item -from platformcode import config, logger -from platformcode import platformtools - -__perfil__ = config.get_setting('perfil', "diskokosmiko") - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']] - -if __perfil__ - 1 >= 0: - color1, color2, color3, color4, color5 = perfil[__perfil__ - 1] -else: - color1 = color2 = color3 = color4 = color5 = "" - -adult_content = config.get_setting("adult_content", "diskokosmiko") - - -def login(pagina): - logger.info() - try: - dom = pagina.split(".")[0] - user = config.get_setting("%suser" %dom, "diskokosmiko") - password = config.get_setting("%spassword" %dom, "diskokosmiko") - if not user: - return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina) - data = httptools.downloadpage("http://%s" % pagina).data - if re.search(r'(?i)%s' % user, data): - return True, "" - token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"') - post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password) - headers = {'X-Requested-With': 'XMLHttpRequest'} - url_log = "http://%s/action/Account/Login" % pagina - data = httptools.downloadpage(url_log, post, headers).data - if "redirectUrl" in data: - logger.info("Login correcto") - return True, "" - else: - logger.error("Error en el login") - return False, "Nombre de usuario no válido. Comprueba tus credenciales" - except: - import traceback - logger.error(traceback.format_exc()) - return False, "Error durante el login. Comprueba tus credenciales" - - -def mainlist(item): - logger.info() - itemlist = [] - item.text_color = color1 - logueado, error_message = login("diskokosmiko.mx") - if not logueado: - itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) - else: - item.extra = "http://diskokosmiko.mx/" - itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2)) - itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles")) - itemlist.append(item.clone(title=" Colecciones", action="colecciones", - url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1")) - itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", - url="http://diskokosmiko.mx/action/SearchFiles")) - itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) - itemlist.append(item.clone(action="", title="")) - folder_thumb = filetools.join(config.get_data_path(), 'thumbs_disko') - files = filetools.listdir(folder_thumb) - if files: - itemlist.append( - item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red")) - itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold")) - return itemlist - - -def search(item, texto): - logger.info() - item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace( - " ", "+") - try: - return listado(item) - except: - import sys, traceback - for line in sys.exc_info(): - logger.error("%s" % line) - logger.error(traceback.format_exc()) - return [] - - -def configuracion(item): - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def listado(item): - logger.info() - itemlist = [] - data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data - if not item.post: - data_thumb = "" - item.url = item.url.replace("/gallery,", "/list,") - data = httptools.downloadpage(item.url, item.post).data - data = re.sub(r"\n|\r|\t|\s{2}| |
      ", "", data) - - folder = filetools.join(config.get_data_path(), 'thumbs_disko') - patron = 'data-file-id(.*?

      )
' - bloques = scrapertools.find_multiple_matches(data, patron) - for block in bloques: - if "adult_info" in block and not adult_content: - continue - size = scrapertools.find_single_match(block, '([^<]+)

') - patron = 'class="name">
([^<]+)<' - scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron) - scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'") - if scrapedthumbnail: - try: - thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?") - if data_thumb: - url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb) - else: - url_thumb = scrapedthumbnail - scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:]) - except: - scrapedthumbnail = "" - if scrapedthumbnail: - t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb]) - t.setDaemon(True) - t.start() - else: - scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png" - scrapedurl = item.extra + scrapedurl - title = "%s (%s)" % (scrapedtitle, size) - if "adult_info" in block: - title += " [COLOR %s][+18][/COLOR]" % color4 - plot = scrapertools.find_single_match(block, '
(.*?)
') - if plot: - plot = scrapertools.decodeHtmlentities(plot) - new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2, - extra=item.extra, infoLabels={'plot': plot}, post=item.post) - if item.post: - try: - new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block, - '

([^<]+)<') - except: - pass - else: - new_item.folderurl = item.url.rsplit("/", 1)[0] - new_item.foldername = item.foldername - new_item.fanart = item.thumbnail - itemlist.append(new_item) - next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"') - if next_page: - if item.post: - post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post) - url = item.url - else: - url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url) - post = "" - itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page, - url=url, post=post, extra=item.extra)) - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="diskokosmiko")) - usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra) - url_usuario = item.extra + "/" + usuario - if item.folderurl and not item.folderurl.startswith(item.extra): - item.folderurl = item.extra + item.folderurl - if item.post: - itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername, - url=item.folderurl + "/gallery,1,1?ref=pager", post="")) - data = httptools.downloadpage(item.folderurl).data - token = scrapertools.find_single_match(data, - 'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"') - collection_id = item.folderurl.rsplit("-", 1)[1] - post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id) - url = "%s/action/Follow/Follow" % item.extra - title = "Seguir Colección: %s" % item.foldername - if "dejar de seguir" in data: - title = "Dejar de seguir la colección: %s" % item.foldername - url = "%s/action/Follow/UnFollow" % item.extra - itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False)) - itemlist.append( - item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario)) - return itemlist - - -def colecciones(item): - logger.info() - itemlist = [] - usuario = False - data = httptools.downloadpage(item.url).data - if "Ver colecciones del usuario" not in item.title and not item.index: - data = jsontools.load(data)["Data"] - content = data["Content"] - content = re.sub(r"\n|\r|\t|\s{2}| |
", "", content) - else: - usuario = True - if item.follow: - content = scrapertools.find_single_match(data, - 'id="followed_collections"(.*?)