diff --git a/plugin.video.alfa/channels/kbagi.py b/plugin.video.alfa/channels/kbagi.py index d3fedc41..a43fb282 100644 --- a/plugin.video.alfa/channels/kbagi.py +++ b/plugin.video.alfa/channels/kbagi.py @@ -2,12 +2,17 @@ import re import threading +import urllib +import xbmc +from core import downloadtools from core import filetools from core import httptools +from core import jsontools from core import scrapertools from core.item import Item from platformcode import config, logger +from platformcode import platformtools __perfil__ = config.get_setting('perfil', "kbagi") @@ -26,23 +31,15 @@ adult_content = config.get_setting("adult_content", "kbagi") def login(pagina): logger.info() - try: - user = config.get_setting("%suser" % pagina.split(".")[0], "kbagi") - password = config.get_setting("%spassword" % pagina.split(".")[0], "kbagi") - if pagina == "kbagi.com": - if user == "" and password == "": - return False, "Para ver los enlaces de kbagi es necesario registrarse en kbagi.com" - elif user == "" or password == "": - return False, "kbagi: Usuario o contraseña en blanco. Revisa tus credenciales" - else: - if user == "" or password == "": - return False, "DiskoKosmiko: Usuario o contraseña en blanco. Revisa tus credenciales" - + dom = pagina.split(".")[0] + user = config.get_setting("%suser" %dom, "kbagi") + password = config.get_setting("%spassword" %dom, "kbagi") + if not user: + return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina) data = httptools.downloadpage("http://%s" % pagina).data if re.search(r'(?i)%s' % user, data): return True, "" - token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"') post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password) headers = {'X-Requested-With': 'XMLHttpRequest'} @@ -64,9 +61,7 @@ def mainlist(item): logger.info() itemlist = [] item.text_color = color1 - logueado, error_message = login("kbagi.com") - if not logueado: itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) else: @@ -79,24 +74,25 @@ def mainlist(item): itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", url="http://kbagi.com/action/SearchFiles")) itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) - - item.extra = "http://diskokosmiko.mx/" - itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2)) - itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles")) - itemlist.append(item.clone(title=" Colecciones", action="colecciones", - url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1")) - itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", - url="http://diskokosmiko.mx/action/SearchFiles")) - itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) - itemlist.append(item.clone(action="", title="")) - + logueado, error_message = login("diskokosmiko.mx") + if not logueado: + itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) + else: + item.extra = "http://diskokosmiko.mx/" + itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2)) + itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles")) + itemlist.append(item.clone(title=" Colecciones", action="colecciones", + url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1")) + itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", + url="http://diskokosmiko.mx/action/SearchFiles")) + itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) + itemlist.append(item.clone(action="", title="")) folder_thumb = filetools.join(config.get_data_path(), 'thumbs_kbagi') files = filetools.listdir(folder_thumb) if files: itemlist.append( item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red")) itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold")) - return itemlist @@ -115,7 +111,6 @@ def search(item, texto): def configuracion(item): - from platformcode import platformtools ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret @@ -124,12 +119,10 @@ def configuracion(item): def listado(item): logger.info() itemlist = [] - data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data if not item.post: data_thumb = "" item.url = item.url.replace("/gallery,", "/list,") - data = httptools.downloadpage(item.url, item.post).data data = re.sub(r"\n|\r|\t|\s{2}| |
", "", data) @@ -153,12 +146,10 @@ def listado(item): scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:]) except: scrapedthumbnail = "" - if scrapedthumbnail: t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb]) t.setDaemon(True) t.start() - else: scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png" scrapedurl = item.extra + scrapedurl @@ -168,7 +159,6 @@ def listado(item): plot = scrapertools.find_single_match(block, '
(.*?)
') if plot: plot = scrapertools.decodeHtmlentities(plot) - new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2, extra=item.extra, infoLabels={'plot': plot}, post=item.post) @@ -182,7 +172,6 @@ def listado(item): new_item.folderurl = item.url.rsplit("/", 1)[0] new_item.foldername = item.foldername new_item.fanart = item.thumbnail - itemlist.append(new_item) next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"') if next_page: @@ -194,27 +183,23 @@ def listado(item): post = "" itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page, url=url, post=post, extra=item.extra)) - return itemlist def findvideos(item): logger.info() itemlist = [] - itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="kbagi")) usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra) url_usuario = item.extra + "/" + usuario - if item.folderurl and not item.folderurl.startswith(item.extra): item.folderurl = item.extra + item.folderurl if item.post: itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername, url=item.folderurl + "/gallery,1,1?ref=pager", post="")) - data = httptools.downloadpage(item.folderurl).data token = scrapertools.find_single_match(data, - 'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"') + 'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"') collection_id = item.folderurl.rsplit("-", 1)[1] post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id) url = "%s/action/Follow/Follow" % item.extra @@ -223,18 +208,14 @@ def findvideos(item): title = "Dejar de seguir la colección: %s" % item.foldername url = "%s/action/Follow/UnFollow" % item.extra itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False)) - itemlist.append( item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario)) - return itemlist def colecciones(item): logger.info() - from core import jsontools itemlist = [] - usuario = False data = httptools.downloadpage(item.url).data if "Ver colecciones del usuario" not in item.title and not item.index: @@ -250,10 +231,8 @@ def colecciones(item): content = scrapertools.find_single_match(data, '
", "", content) - patron = '([^<]+)<.*?src="([^"]+)".*?

(.*?)

' matches = scrapertools.find_multiple_matches(content, patron) - index = "" if item.index and item.index != "0": matches = matches[item.index:item.index + 20] @@ -262,7 +241,6 @@ def colecciones(item): elif len(matches) > 20: matches = matches[:20] index = 20 - folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') for url, scrapedtitle, thumb, info in matches: url = item.extra + url + "/gallery,1,1?ref=pager" @@ -285,13 +263,11 @@ def colecciones(item): itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra, foldername=scrapedtitle)) - if not usuario and data.get("NextPageUrl"): url = item.extra + data["NextPageUrl"] itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color="")) elif index: itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color="")) - return itemlist @@ -302,15 +278,12 @@ def seguir(item): if "Dejar" in item.title: message = "La colección ya no se sigue" if data.sucess and config.get_platform() != "plex": - from platformcode import platformtools platformtools.dialog_notification("Acción correcta", message) def cuenta(item): logger.info() - import urllib itemlist = [] - web = "kbagi" if "diskokosmiko" in item.extra: web = "diskokosmiko" @@ -318,7 +291,6 @@ def cuenta(item): if not logueado: itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) return itemlist - user = config.get_setting("%suser" % web, "kbagi") user = unicode(user, "utf8").lower().encode("utf8") url = item.extra + "/" + urllib.quote(user) @@ -336,16 +308,13 @@ def cuenta(item): text_color=color5, follow=True)) else: itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4)) - return itemlist def filtro(item): logger.info() - list_controls = [] valores = {} - dict_values = None list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020', 'type': 'text', 'default': '', 'visible': True}) @@ -353,14 +322,12 @@ def filtro(item): 'type': 'list', 'default': -1, 'visible': True}) list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos'] valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', ''] - list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58', 'type': 'text', 'default': '', 'visible': True}) list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA', 'type': 'text', 'default': '0', 'visible': True}) list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA', 'type': 'text', 'default': '0', 'visible': True}) - # Se utilizan los valores por defecto/guardados web = "kbagi" if "diskokosmiko" in item.extra: @@ -369,7 +336,6 @@ def filtro(item): if valores_guardados: dict_values = valores_guardados item.valores = valores - from platformcode import platformtools return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption="Filtra la búsqueda", item=item, callback='filtrado') @@ -381,18 +347,15 @@ def filtrado(item, values): web = "diskokosmiko" # Guarda el filtro para que sea el que se cargue por defecto config.set_setting("filtro_defecto_" + web, values_copy, item.channel) - tipo = item.valores["tipo"][values["tipo"]] search = values["search"] ext = values["ext"] tmin = values["tmin"] tmax = values["tmax"] - if not tmin.isdigit(): tmin = "0" if not tmax.isdigit(): tmax = "0" - item.valores = "" item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \ % (tipo, search, tmin, tmax, ext) @@ -401,18 +364,14 @@ def filtrado(item, values): def download_thumb(filename, url): - from core import downloadtools - lock = threading.Lock() lock.acquire() folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') if not filetools.exists(folder): filetools.mkdir(folder) lock.release() - if not filetools.exists(filename): downloadtools.downloadfile(url, filename, silent=True) - return filename @@ -420,5 +379,4 @@ def delete_cache(url): folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') filetools.rmdirtree(folder) if config.is_xbmc(): - import xbmc xbmc.executebuiltin("Container.Refresh") diff --git a/plugin.video.alfa/channels/mejortorrent.json b/plugin.video.alfa/channels/mejortorrent.json index a64f06e1..49621d31 100755 --- a/plugin.video.alfa/channels/mejortorrent.json +++ b/plugin.video.alfa/channels/mejortorrent.json @@ -28,6 +28,14 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true } ] } \ No newline at end of file diff --git a/plugin.video.alfa/channels/mejortorrent.py b/plugin.video.alfa/channels/mejortorrent.py index c31662f6..424c122c 100755 --- a/plugin.video.alfa/channels/mejortorrent.py +++ b/plugin.video.alfa/channels/mejortorrent.py @@ -8,13 +8,13 @@ import urlparse from channelselector import get_thumb from core import httptools from core import scrapertools +from core import servertools from core.item import Item -from core.tmdb import Tmdb -from platformcode import logger +from platformcode import config, logger +from core import tmdb host = "http://www.mejortorrent.com" - def mainlist(item): logger.info() @@ -28,48 +28,889 @@ def mainlist(item): thumb_docus = get_thumb("channels_documentary.png") thumb_buscar = get_thumb("search.png") - itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist", + itemlist.append(Item(channel=item.channel, title="Novedades", action="listado_busqueda", extra="novedades", tipo=False, + url= host + "/secciones.php?sec=ultimos_torrents", thumbnail=thumb_buscar)) + itemlist.append(Item(channel=item.channel, title="Peliculas", action="listado", extra="peliculas", tipo=False, url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis)) - itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist", + itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="listado", extra="peliculas", tipo=False, url= host + "/torrents-de-peliculas-hd-alta-definicion.html", thumbnail=thumb_pelis_hd)) - itemlist.append(Item(channel=item.channel, title="Series", action="getlist", + itemlist.append(Item(channel=item.channel, title="Películas Listado Alfabetico", action="alfabeto", + url= host + "/peliculas-buscador.html" + + "?campo=letra&valor&valor2=Acci%%F3n&valor3=%s&valor4=3&submit=Buscar", extra="peliculas", + thumbnail=thumb_pelis)) + itemlist.append(Item(channel=item.channel, title="Series", action="listado", extra="series", tipo=False, url= host + "/torrents-de-series.html", thumbnail=thumb_series)) - itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist", + itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="alfabeto", extra="series", + url= host + "/torrents-de-series.html", thumbnail=thumb_series_az)) + itemlist.append(Item(channel=item.channel, title="Series HD", action="listado", extra="series", tipo=False, url= host + "/torrents-de-series-hd-alta-definicion.html", thumbnail=thumb_series_hd)) - itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico", - url= host + "/torrents-de-series.html", thumbnail=thumb_series_az)) - #itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist", - # url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) - itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar)) + itemlist.append(Item(channel=item.channel, title="Series HD Listado Alfabetico", action="alfabeto", extra="series-hd", + url= host + "/torrents-de-series-hd-alta-definicion.html", thumbnail=thumb_series_az)) + itemlist.append(Item(channel=item.channel, title="Documentales", action="listado", extra="documentales", tipo=False, + url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) + itemlist.append(Item(channel=item.channel, title="Documentales Listado Alfabetico", action="alfabeto", extra="documentales", url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar, tipo=False)) return itemlist -def listalfabetico(item): +def alfabeto(item): logger.info() - itemlist = [] - for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', - 'U', 'V', 'W', 'X', 'Y', 'Z']: - itemlist.append(Item(channel=item.channel, action="getlist", title=letra, - url= host + "/series-letra-" + letra.lower() + ".html")) + if item.extra == "series-hd": + itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra="series", tipo=True, + url= host + "/secciones.php?sec=descargas&ap=series_hd&func=mostrar&letra=.")) + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra="series", tipo=True, + url= host + "/secciones.php?sec=descargas&ap=series_hd&func=mostrar&letra=" + letra.lower())) - itemlist.append(Item(channel=item.channel, action="getlist", title="Todas", - url= host + "/series-letra..html")) + elif item.extra == "series" or item.extra == "documentales": + itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra=item.extra, tipo=True, url= host + "/" + item.extra + "-letra-..html")) + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra=item.extra, tipo=True, url= host + "/" + item.extra + "-letra-" + letra.lower() + ".html")) + + elif item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra=item.extra, tipo=True, url=item.url % ".")) + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra=item.extra, tipo=True, url=item.url % letra.lower())) return itemlist +def listado(item): + logger.info() + itemlist = [] + url_next_page ='' # Controlde paginación + cnt_tot = 30 # Poner el num. máximo de items por página + + if item.category: + del item.category + if item.totalItems: + del item.totalItems + + # La url de Películas por orden Alfabético tiene un formato distinto + if item.extra == "peliculas" and item.tipo: + url = item.url.split("?") + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(url[0], post=url[1]).data) + else: + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + + # En este canal las url's y los títulos tienen diferente formato dependiendo del contenido + if item.extra == "peliculas" and item.tipo: #Desde Lista Alfabética + patron = "
cnt_tot and item.extra == "documentales" and pag: + item.next_page = '' + if item.next_page != 'b': + if matches_cnt > cnt_pag + cnt_tot: + url_next_page = item.url + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = '' + if matches_cnt <= cnt_pag + (cnt_tot * 2): + if pag: + next_page = 'b' + modo = 'continue' + else: + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = 'a' + patron_next_page = " Siguiente >> <\/a>" + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + modo = 'continue' + if len(matches_next_page) > 0: + url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) + modo = 'next' + if item.next_page: + del item.next_page + + #logger.debug(data) + #logger.debug("PATRON1: " + patron + " / ") + #logger.debug(matches) + + # Primera pasada + # En la primera pasada se obtiene una información básica del título a partir de la url + # Para Series se obtienen la temporada y el episodio + # Se limpian algunas etiquetas del item inical. + for scrapedurl, scrapedthumbnail in matches: + item_local = item.clone() + if item_local.tipo: + del item_local.tipo + if item_local.totalItems: + del item_local.totalItems + item_local.title = '' + item_local.context = "['buscar_trailer']" + + item_local.title = scrapertools.get_match(scrapedurl, patron_enlace) + item_local.title = item_local.title.replace("-", " ") + item_local.url = urlparse.urljoin(item_local.url, scrapedurl) + item_local.thumbnail = host + urllib.quote(scrapedthumbnail) + item_local.contentThumbnail = item_local.thumbnail + item_local.infoLabels['year'] = '-' # Al no saber el año, le ponemos "-" y TmDB lo calcula automáticamente + + # Para que el menú contextual muestre conrrectamente las opciones de añadir a Videoteca + if item_local.extra == "series": + if "/serie-episodio" in item_local.url: + item_local.contentType = "episode" + else: + item_local.contentType = "season" + + # Poner nombre real de serie. Busca nº de temporada y capítulo + if item_local.extra == "series" or item.extra == "documentales": + if item_local.contentType == "episode": + real_title = scrapertools.find_single_match(scrapedurl, patron_title_ep) + real_title = real_title.replace("-", " ") + item_local.contentSeason = scrapertools.find_single_match(scrapedurl, '.*?-(\d{1,2})x\d{1,2}.*?\.html') + + #Hay que buscar la raiz de la temporada + data_epi = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item_local.url).data) + url = scrapertools.find_single_match(data_epi, r" cnt_pag + cnt_tot: + url_next_page = item.url + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = '' + if matches_cnt <= cnt_pag + (cnt_tot * 2): + if pag: + next_page = 'b' + modo = 'continue' + else: + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = 'a' + patron_next_page = " Siguiente >> <\/a>" + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + modo = 'continue' + if len(matches_next_page) > 0: + url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) + modo = 'next' + if item.next_page: + del item.next_page + + if matches_cnt >= cnt_tot: + cnt_pag += cnt_tot + else: + cnt_pag += matches_cnt + + for scrapedurl, scrapedtitle, scrapedinfo in matches: + # Creamos "item_local" y lo limpiamos un poco de algunos restos de item + item_local = item.clone() + if item_local.category: + del item_local.category + if item_local.tipo: + del item_local.tipo + if item_local.totalItems: + del item_local.totalItems + item_local.contentThumbnail = '' + item_local.thumbnail = '' + item_local.context = "['buscar_trailer']" + item_local.infoLabels['year'] = '-' # Al no saber el año, le ponemos "-" y TmDB lo calcula automáticamente + + # Limpiamos títulos, Sacamos datos de calidad, audio y lenguaje + scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip() + title = scrapedtitle + + title_subs = "" + item_local.language = "" + if "[subs" in title or "[Subs" in title or "[VOS" in title or "[VOSE" in title or "(V.O.S.E" in title: + item_local.language = "VOS" + title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" (V.O.S.E)", "") + if "latino" in title or "Latino" in title or "rgentina" in title: + item_local.language = "LAT" + title = title.replace(" Latino", "").replace(" latino", "").replace(" Argentina", "").replace(" argentina", "") + title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "") + + if "3d" in title or "3D" in title: #Reservamos info de subtítulos para después de TMDB + title_subs = "[3D]" + title = title.replace(" [3d]", "").replace(" 3d", "").replace(" [3D]", "").replace(" 3D", "") + if "Temp" in title or "temp" in title: #Reservamos info de Temporada para después de TMDB + title_subs = "[Temp.]" + if "Audio" in title or "audio" in title: #Reservamos info de subtítulos para después de TMDB + title_subs = '[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])') + title = re.sub(r'\[[a|A]udio.*?\]', '', title) + if "[Dual" in title or "[dual" in title: + title_subs = "[Dual]" + title = title = re.sub(r'\[D|dual.*?\]', '', title) + + if title.endswith('.'): + title = title[:-1] + title = title.replace("á", "a", 1).replace("é", "e", 1).replace("í", "i", 1).replace("ó", "o", 1).replace("ú", "u", 1).replace("ü", "u", 1) + if not title: + title = "dummy" + title = scrapertools.remove_htmltags(title) + + if item.extra == "novedades" and ("/serie-" in scrapedurl or "/doc-" in scrapedurl): + item_local.quality = scrapertools.find_single_match(scrapedtitle, r'.*?\[(.*?)\]') + else: + item_local.quality = scrapertools.remove_htmltags(scrapedinfo).decode('iso-8859-1').encode('utf8') + item_local.quality = item_local.quality.replace("(", "").replace(")", "").replace("[", "").replace("]", "").replace("Documental", "").replace("documental", "") + + item_local.url = urlparse.urljoin(item.url, scrapedurl) + + #Preparamos la información básica para TMDB + if "/serie-" in scrapedurl or "/doc-" in scrapedurl: + item_local.action = "episodios" + if "/serie-" in scrapedurl: + item_local.extra = "series" + else: + item_local.extra = "documentales" + item_local.contentType = "season" + + title = re.sub(r'\[\d+.*?\]', '', title) # Quitar la calidad del título + item_local.contentSerieName = scrapertools.find_single_match(title, '(.*?) - \d.*?') + if not item_local.contentSerieName: + item_local.contentSerieName = title + if item_local.contentSerieName.endswith(' '): + item_local.contentSerieName = item_local.contentSerieName[:-1] + title = item_local.contentSerieName + item_local.title = title + item_local.infoLabels['tvshowtitle'] = item_local.contentSerieName + if not item_local.contentSerieName: + item_local.contentSerieName = "dummy" + item_local.contentSeason = scrapertools.find_single_match(scrapedurl, '.*?-(\d{1,2})-Temp.*?\.html') + if not item_local.contentSeason: + item_local.contentSeason = 1 + + if "(HDRip" in title or "(BR" in title or "(HDRip" in title or "(VHSRip" in title or "(DVDRip" in title or "(FullB" in title or "(fullb" in title or "(Blu" in title or "(4K" in title or "(4k" in title or "(HEVC" in title or "(IMAX" in title or "Extendida" in title or "[720p]" in title or "[1080p]" in title: + if not item_local.quality: + item_local.quality = scrapertools.find_single_match(title, r'\(.*?\)?\(.*?\)') + if not item_local.quality: + item_local.quality = scrapertools.find_single_match(title, r'[\[|\(](.*?)[\)|\]]') + title = re.sub(r'\(.*?\)?\(.*?\)', '', title) + title = re.sub(r'[\[|\(].*?[\)|\]]', '', title) + if not item_local.quality: + if "FullBluRay" in title or "fullbluray" in title: + item_local.quality = "FullBluRay" + title = title.replace("FullBluRay", "").replace("fullbluray", "") + if "4K" in title or "4k" in title or "HDR" in title or "hdr" in title: + item_local.quality = "4K" + title = title.replace("4k-hdr", "").replace("4K-HDR", "").replace("hdr", "").replace("HDR", "").replace("4k", "").replace("4K", "") + title = title.replace("(", "").replace(")", "").replace("[", "").replace("]", "") + if title.endswith(' '): + title = title[:-1] + item_local.title = title + + if "/peli-" in scrapedurl: + item_local.action = "findvideos" + item_local.extra = "peliculas" + item_local.contentType = "movie" + item_local.contentTitle = title + + if "Saga" in item_local.contentTitle or "Saga" in item_local.contentSerieName: + item_local.contentTitle = item_local.contentTitle.replace("Saga ", "").replace("Saga", "") + item_local.contentSerieName = item_local.contentSerieName.replace("Saga ", "").replace("Saga", "") + title_subs = "[Saga]" + if "Colecc" in item_local.contentTitle or "Colecc" in item_local.contentSerieName: + item_local.contentTitle = item_local.contentTitle.replace("Coleccion ", "").replace("Coleccion", "") + item_local.contentSerieName = item_local.contentSerieName.replace("Coleccion ", "").replace("Coleccion", "") + title_subs = "[Coleccion]" + + # Guardamos temporalmente info de subtítulos, si lo hay + item_local.extra = item_local.extra + title_subs + + itemlist.append(item_local.clone()) + + #logger.debug(item_local) + + #Llamamos a TMDB para que complete InfoLabels desde itemlist. Mejor desde itemlist porque envía las queries en paralelo + tmdb.set_infoLabels(itemlist, seekTmdb = True) + + # Pasada para maqullaje de los títulos obtenidos desde TMDB + for item_local in itemlist: + title = item_local.title + title_subs = "" + temporada = "" + title_subs = scrapertools.find_single_match(item_local.extra, r'(\[.*?\])') + if "[Temp.]" in item_local.extra: + temporada = "[Temp.]" + title_subs = "" + if "Audio" in item_local.extra or "audio" in item_local.extra: + title_subs = '[%s]' % scrapertools.find_single_match(item_local.extra, r'\[[a|A]udio (.*?)\]') + item_local.extra = re.sub(r'\[.*?\]', '', item_local.extra) + + # Si TMDB no ha encontrado el vídeo limpiamos el año + if item_local.infoLabels['year'] == "-": + item_local.infoLabels['year'] = '' + item_local.infoLabels['aired'] = '' + + # Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if item_local.contentType == "season" or item_local.contentType == "tvshow": + if item_local.extra == "series" or temporada == "[Temp.]": + title = '%s - Temporada %s [%s][%s][%s]' % (item_local.contentSerieName, str(item_local.contentSeason), scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), item_local.quality, item_local.language) + else: + title = '%s [%s][%s][%s]' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), item_local.quality, item_local.language) + + elif item_local.contentType == "movie": + title = '%s [%s][%s][%s]' % (title, str(item_local.infoLabels['year']), item_local.quality, item_local.language) + + if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados: + if item_local.contentType == "season" or item_local.contentType == "tvshow": + if item_local.extra == "series" or temporada == "[Temp.]": + title = '%s - Temporada %s -%s-' % (item_local.contentSerieName, item_local.contentSeason, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')) + else: + title = '%s -%s-' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')) + title_subs = title_subs.replace("[", "-").replace("]", "-") + + title = title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "") + item_local.title = title + title_subs + item_local.contentTitle += title_subs #añadimos info adicional para display + + #logger.debug("title=[" + item_local.title + "], url=[" + item_local.url + "], calidad=[" + item_local.quality + "]") + #logger.debug(item_local) + + if url_next_page: + itemlist.append( + Item(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >>[/B][/COLOR]", url=url_next_page, next_page=next_page, cnt_pag=cnt_pag, pag=pag, modo=modo, extra=item.extra, tipo=item.tipo)) + + logger.debug(url_next_page + " / " + next_page + " / " + str(matches_cnt) + " / " + str(cnt_pag) + " / " + str(pag) + " / " + modo + " / " + item.extra + " / " + str(item.tipo)) + + return itemlist + + +def findvideos(item): + #import xbmc + logger.info() + itemlist = [] + + # Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal, + # con la función "play_from_library" + #unify_status = False + #if xbmc.getCondVisibility('Window.IsMedia') == 1: + # unify_status = config.get_setting("unify") + unify_status = config.get_setting("unify") + + # Obtener la información actualizada del Episodio, si no la hay + if not item.infoLabels['tmdb_id']: + tmdb.set_infoLabels(item, True) + + if item.post: #Puede traer datos para una llamada "post". De momento usado para documentales, pero podrían ser series + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, post=item.post).data) + data = data.replace('"', "'") + patron = ">Pincha.*?)", "", httptools.downloadpage(item.url).data) + patron = ")", "", httptools.downloadpage(url).data) + #logger.debug(torrent_data) + link = scrapertools.get_match(torrent_data, ">Pincha.*?)", "", httptools.downloadpage(item.url).data) + + #Datos para crear el Post. Usado para documentales + total_capis = scrapertools.find_single_match(data, "") + tabla = scrapertools.find_single_match(data, "") + titulo_post = scrapertools.find_single_match(data, "") + + # Selecciona en tramo que nos interesa + data = scrapertools.find_single_match(data, + "(
.*?)
") + + # Prepara el patrón de búsqueda de: URL, título, fechas y dos valores mas sin uso + if item.extra == 'series': + patron = ".*?]+>
?([^>]+)<\/a><\/td>.*?" + else: + patron = "
.*?]+>(.*?)<\/td>.*?" + patron += "]+>Fecha: ([^<]+)<\/div><\/td>.*?" + patron += " 1: + #temporada = re.sub("\D", "", epi[0]) + capitulo = re.search("\d+", epi[1]) + if capitulo: + item_local.contentEpisodeNumber = capitulo.group() + else: + item_local.contentEpisodeNumber = 1 + + else: #Se prepara el Post para documentales + item_local.contentEpisodeNumber = 1 + item_local.url = host + "/secciones.php?sec=descargas&ap=contar_varios" + item_local.post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo_post}) + + item_local.title = scrapedtitle + + itemlist.append(item_local.clone()) + + # Llamamos a TMDB para que complete el episodio en InfoLabels + tmdb.set_infoLabels(itemlist, seekTmdb = True) + + # Pasada para maqullaje de los títulos obtenidos desde TMDB + for item_local in itemlist: + title = item_local.title + + # Si no hay datos de TMDB, pongo los datos locales que conozco + if item_local.infoLabels['aired']: + year = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})') + else: + year = scrapertools.find_single_match(fecha, r'(\d{4})') + if not item_local.infoLabels['year']: + item_local.infoLabels['year'] = year + + # Si son episodios múltiples, se toman los datos locales para nombre de episodio + if scrapertools.find_single_match(title, r'\d+x\d+.*?(\w+.*?\d+x\d+)'): + item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, r'\d+x\d+.*?(\w+.*?\d+x\d+)') + + #Preparamos el título para que sea compatible con Añadir Serie a Videoteca + if item_local.infoLabels['episodio_titulo']: + item_local.title = '%sx%s %s, %s [%s]' % (str(item_local.contentSeason), item_local.contentEpisodeNumber, item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, year) + item_local.infoLabels['episodio_titulo'] = '%s [%s]' % (item_local.infoLabels['episodio_titulo'], year) + else: + item_local.title = '%sx%s %s [%s]' % (str(item_local.contentSeason), item_local.contentEpisodeNumber, item_local.contentSerieName, year) + + #Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: añadir calidad, lenguaje + item_local.title = '%s [%s][%s]' % (item_local.title, item_local.quality, item_local.language) + + #Quitamos campos vacíos + if item_local.infoLabels['episodio_titulo']: + item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("[]", "") + item_local.title = item_local.title.replace("[]", "") + + #logger.debug("title=[" + item_local.title + "], url=[" + item_local.url + "], item=[" + str(item_local) + "]") + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(item.clone(title="[COLOR yelow]Añadir esta serie a la videoteca[/COLOR]", action="add_serie_to_library", extra="episodios")) + + return itemlist def search(item, texto): - logger.info() + itemlist = [] + logger.info("search:" + texto) texto = texto.replace(" ", "+") item.url = host + "/secciones.php?sec=buscador&valor=%s" % (texto) + try: - return buscador(item) + itemlist = listado_busqueda(item) + return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: @@ -77,327 +918,28 @@ def search(item, texto): logger.error("%s" % line) return [] - -def buscador(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = "]+>(.*?)" - patron += ".*?([^']+)" - patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" - - matches = scrapertools.find_multiple_matches(data, patron) - - for scrapedurl, scrapedtitle, scrapedinfo in matches: - title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode( - 'utf8') + ' ' + scrapedinfo.decode('iso-8859-1').encode('utf8') - url = urlparse.urljoin(item.url, scrapedurl) - logger.debug("title=[" + title + "], url=[" + url + "]") - - itemlist.append( - Item(channel=item.channel, action="episodios", title=title, url=url, folder=True, extra="series", - viewmode="movie_with_plot")) - - # busca pelis - patron = "]+>(.*?)" - patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8') - url = urlparse.urljoin(item.url, scrapedurl) - logger.debug("title=[" + title + "], url=[" + url + "]") - itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, folder=False, extra="pelicula")) - - # busca docu - patron = " -1: - patron = '[^<]+' - patron += '' - patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "show_movie_info" - folder = True - extra = "" - elif item.url.find("series-letra") > -1: - patron = "()" - patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "episodios" - folder = True - extra = "series" - elif item.url.find("series") > -1: - patron = '[^<]+' - patron += '' - patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "episodios" - folder = True - extra = "series" - else: - patron = '[^<]+' - patron += '' - patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "episodios" - folder = True - extra = "docus" - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail in matches: - title = scrapertools.get_match(scrapedurl, patron_enlace) - title = title.replace("-", " ") - url = urlparse.urljoin(item.url, scrapedurl) - thumbnail = host + urllib.quote(scrapedthumbnail) - plot = "" - itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot, - folder=folder, extra=extra)) - - matches = re.compile(patron_title, re.DOTALL).findall(data) - - cnt = 0 - for scrapedtitle, notused, scrapedinfo in matches: - title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip() - if title.endswith('.'): - title = title[:-1] - - info = scrapedinfo.decode('iso-8859-1').encode('utf8') - if info != "": - title = '{0} {1}'.format(title, info) - - itemlist[cnt].title = title - cnt += 1 - if cnt == len(itemlist) - 1: - break - - if len(itemlist) == 0: - itemlist.append(Item(channel=item.channel, action="mainlist", title="No se ha podido cargar el listado")) - else: - # Extrae el paginador - patronvideos = " Siguiente >>" - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - scrapedurl = urlparse.urljoin(item.url, matches[0]) - itemlist.append( - Item(channel=item.channel, action="getlist", title="Pagina siguiente >>", url=scrapedurl, folder=True)) - - return itemlist - - -def episodios(item): - #import web_pdb; web_pdb.set_trace() - logger.info() - itemlist = [] - - # Descarga la página - data = httptools.downloadpage(item.url).data - - total_capis = scrapertools.get_match(data, "") - tabla = scrapertools.get_match(data, "") - titulo = scrapertools.get_match(data, "") - - item.thumbnail = scrapertools.find_single_match(data, - "src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'") - item.thumbnail = host + urllib.quote(item.thumbnail) - - # - data = scrapertools.get_match(data, - "(.*?)") - if item.extra == "series": - patron = "]+>]+>([^>]+)
[^<]+" - else: - patron = "]+>([^>]+)[^<]+" - - patron += "]+>Fecha: ([^<]+)
[^<]+" - patron += " 1: - temporada = re.sub("\D", "", epi[0]) - capitulo = re.search("\d+", epi[1]) - if capitulo: - capitulo = capitulo.group() - else: - capitulo = 1 - - epi_data = oTmdb.get_episodio(temporada, capitulo) - logger.debug("epi_data=" + str(epi_data)) - - if epi_data: - item.thumbnail = epi_data["temporada_poster"] - item.fanart = epi_data["episodio_imagen"] - item.plot = epi_data["episodio_sinopsis"] - epi_title = epi_data["episodio_titulo"] - if epi_title != "": - title = scrapedtitle + " " + epi_title + " (" + fecha + ")" - else: - try: - item.fanart = oTmdb.get_backdrop() - except: - pass - - item.plot = oTmdb.get_sinopsis() - - logger.debug("title=[" + title + "], url=[" + url + "], item=[" + str(item) + "]") - - itemlist.append( - Item(channel=item.channel, action="play", title=title, url=url, thumbnail=item.thumbnail, plot=item.plot, - fanart=item.fanart, extra=post, folder=False, id=value)) - - return itemlist - - -def show_movie_info(item): - logger.info() - - itemlist = [] - - tmdb_title = re.sub(r'\(.*\)|\[.*\]', '', item.title).strip() - logger.debug('tmdb_title=' + tmdb_title) - - try: - oTmdb = Tmdb(texto_buscado=tmdb_title, idioma_busqueda="es") - item.fanart = oTmdb.get_backdrop() - item.plot = oTmdb.get_sinopsis() - except: - pass - - data = httptools.downloadpage(item.url).data - - patron = ""): - link = scrapertools.get_match(torrent_data, "") - else: - link = scrapertools.get_match(torrent_data, "") - link = urlparse.urljoin(url, link) - logger.debug("link=" + link) - itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False, extra="pelicula")) - - return itemlist - - -def play(item): - #import web_pdb; web_pdb.set_trace() - logger.info() - itemlist = [] - - if item.extra == "pelicula": - itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url, - thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False)) - #data = httptools.downloadpage(item.url).data - #logger.debug("data=" + data) - #url http://www.mejortorrent.com/peli-descargar-torrent-16443-Thor-Ragnarok.html - #patron = host + "/peli-descargar-torrent-((.*?))-" - #newid = scrapertools.find_single_match(item.url, patron) - - - - #params = dict(urlparse.parse_qsl(item.extra)) - #patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=" + newid[0] + "&link_bajar=1" - #http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=16443&link_bajar=1 - #link=scrapertools.find_single_match(data,patron) - #data = httptools.downloadpage(link).data - - - #data = httptools.downloadpage(patron).data - #patron = "Pincha " - #link = host + scrapertools.find_single_match(data, patron) - #logger.info("link=" + link) - #itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - # thumbnail=item.thumbnail, plot=item.plot, folder=False)) - - else: - #data = httptools.downloadpage(item.url, post=item.extra).data - data = httptools.downloadpage(item.url).data - #logger.debug("data=" + data) - - params = dict(urlparse.parse_qsl(item.extra)) - patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id - #link=scrapertools.find_single_match(data,patron) - #data = httptools.downloadpage(link).data - - - data = httptools.downloadpage(patron).data - patron = "Pincha " - link = scrapertools.find_single_match(data, patron) - if not host in link: - link = host + link - logger.info("link=" + link) - itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - thumbnail=item.thumbnail, plot=item.plot, folder=False)) - return itemlist - def newest(categoria): logger.info() itemlist = [] item = Item() try: if categoria == 'torrent': - item.url = host + "/torrents-de-peliculas.html" - - itemlist = getlist(item) - if itemlist[-1].title == "Pagina siguiente >>": - itemlist.pop() - item.url = host + "/torrents-de-series.html" - itemlist.extend(getlist(item)) - if itemlist[-1].title == "Pagina siguiente >>": + item.url = host + "/secciones.php?sec=ultimos_torrents" + item.extra = "novedades" + item.channel = "mejortorrent" + item.tipo = False + itemlist = listado_busqueda(item) + if "Pagina siguiente >>" in itemlist[-1].title: itemlist.pop() + if categoria == 'documentales': + item.url = host + "/torrents-de-documentales.html" + item.extra = "documentales" + item.channel = "mejortorrent" + item.tipo = False + itemlist = listado(item) + if "Pagina siguiente >>" in itemlist[-1].title: + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys diff --git a/plugin.video.alfa/channels/plusdede.py b/plugin.video.alfa/channels/plusdede.py index 729b16a3..b345e7b0 100644 --- a/plugin.video.alfa/channels/plusdede.py +++ b/plugin.video.alfa/channels/plusdede.py @@ -29,8 +29,7 @@ def login(): data = httptools.downloadpage(url_origen).data except: data = httptools.downloadpage(url_origen, follow_redirects=False).data - - if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data): + if '' in data: return True token = scrapertools.find_single_match(data, 'Siguiente<\/a>') + next_page = scrapertools.find_single_match(data, '') if next_page != '': itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png')) @@ -222,7 +222,7 @@ def findvideos(item): for video_url in matches: logger.debug('video_url: %s' % video_url) - if 'stream' in video_url: + if 'stream' in video_url and 'streamango' not in video_url: data = httptools.downloadpage('https:'+video_url).data logger.debug(data) if not 'iframe' in video_url: diff --git a/plugin.video.alfa/core/tmdb.py b/plugin.video.alfa/core/tmdb.py index a318cc7d..1ba25685 100755 --- a/plugin.video.alfa/core/tmdb.py +++ b/plugin.video.alfa/core/tmdb.py @@ -404,7 +404,8 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None): otmdb = Tmdb(external_id=item.infoLabels['tvrage_id'], external_source="tvrage_id", tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) - if otmdb is None: + #if otmdb is None: + if not item.infoLabels['tmdb_id'] and not item.infoLabels['imdb_id'] and not item.infoLabels['tvdb_id'] and not item.infoLabels['freebase_mid'] and not item.infoLabels['freebase_id'] and not item.infoLabels['tvrage_id']: # No se ha podido buscar por ID... # hacerlo por titulo if tipo_busqueda == 'tv': diff --git a/plugin.video.alfa/platformcode/launcher.py b/plugin.video.alfa/platformcode/launcher.py index 581246d6..cc62f933 100644 --- a/plugin.video.alfa/platformcode/launcher.py +++ b/plugin.video.alfa/platformcode/launcher.py @@ -24,7 +24,7 @@ def start(): funciones que deseamos que se ejecuten nada mas abrir el plugin. """ logger.info() - config.set_setting('show_once', False) + #config.set_setting('show_once', True) # Test if all the required directories are created config.verify_directories_created() @@ -51,10 +51,10 @@ def run(item=None): item.start = True; else: item = Item(channel="channelselector", action="getmainlist", viewmode="movie") - if config.get_setting('show_once'): + if not config.get_setting('show_once'): platformtools.dialog_ok('Alfa', 'Alfa recomienda para mejorar tu experiencia:', 'Palomitas, relajate y disfruta.') - config.set_setting('show_once', False) + config.set_setting('show_once', True) logger.info(item.tostring()) diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py index e657ab05..800d2ad1 100644 --- a/plugin.video.alfa/servers/flashx.py +++ b/plugin.video.alfa/servers/flashx.py @@ -21,11 +21,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= pfxfx = "" data = httptools.downloadpage(page_url, cookies=False).data data = data.replace("\n","") - cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.bz/counter.cgi.*?[^(?:'|")]+)""") + cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.cc/counter.cgi.*?[^(?:'|")]+)""") cgi_counter = cgi_counter.replace("%0A","").replace("%22","") - playnow = scrapertools.find_single_match(data, 'https://www.flashx.bz/dl[^"]+') + playnow = scrapertools.find_single_match(data, 'https://www.flashx.cc/dl[^"]+') # Para obtener el f y el fxfx - js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.bz/js\w+/c\w+.*?[^(?:'|")]+)""") + js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.cc/js\w+/c\w+.*?[^(?:'|")]+)""") data_fxfx = httptools.downloadpage(js_fxfx).data mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') @@ -35,7 +35,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= logger.info("mfxfxfx2= %s" %pfxfx) if pfxfx == "": pfxfx = "ss=yes&f=fail&fxfx=6" - coding_url = 'https://www.flashx.bz/flashx.php?%s' %pfxfx + coding_url = 'https://www.flashx.cc/flashx.php?%s' %pfxfx # {f: 'y', fxfx: '6'} bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span') flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')