From 8e7140f4aa06d8b26ff5bd74968c015b0d349e59 Mon Sep 17 00:00:00 2001 From: Intel1 Date: Mon, 23 Jul 2018 17:46:38 -0500 Subject: [PATCH] Actualizados bajui: eliminado, web ya no existe divxtotal: eliminado, web ya no existe guaridavalencianista: eliminado, web ya no existe lacajita: eliminado, web ya no existe doom: fix locopelis: fix retroseriestv: nuevo canal veseriesonline: agregado json hdfull: fix para aparecer infoplus en findvideos --- plugin.video.alfa/channels/bajui.json | 26 - plugin.video.alfa/channels/bajui.py | 247 --------- plugin.video.alfa/channels/divxtotal.json | 39 -- plugin.video.alfa/channels/divxtotal.py | 500 ------------------ plugin.video.alfa/channels/doomtv.py | 62 +-- .../channels/guaridavalencianista.json | 12 - .../channels/guaridavalencianista.py | 177 ------- plugin.video.alfa/channels/hdfull.py | 152 +----- plugin.video.alfa/channels/lacajita.json | 69 --- plugin.video.alfa/channels/lacajita.py | 297 ----------- plugin.video.alfa/channels/locopelis.py | 49 +- plugin.video.alfa/channels/retroseriestv.json | 22 + plugin.video.alfa/channels/retroseriestv.py | 214 ++++++++ .../channels/veseriesonline.json | 37 ++ plugin.video.alfa/channels/veseriesonline.pyo | Bin 8550 -> 0 bytes 15 files changed, 340 insertions(+), 1563 deletions(-) delete mode 100644 plugin.video.alfa/channels/bajui.json delete mode 100644 plugin.video.alfa/channels/bajui.py delete mode 100755 plugin.video.alfa/channels/divxtotal.json delete mode 100644 plugin.video.alfa/channels/divxtotal.py delete mode 100755 plugin.video.alfa/channels/guaridavalencianista.json delete mode 100755 plugin.video.alfa/channels/guaridavalencianista.py delete mode 100755 plugin.video.alfa/channels/lacajita.json delete mode 100644 plugin.video.alfa/channels/lacajita.py create mode 100644 plugin.video.alfa/channels/retroseriestv.json create mode 100644 plugin.video.alfa/channels/retroseriestv.py create mode 100644 plugin.video.alfa/channels/veseriesonline.json delete mode 100644 plugin.video.alfa/channels/veseriesonline.pyo diff --git a/plugin.video.alfa/channels/bajui.json b/plugin.video.alfa/channels/bajui.json deleted file mode 100644 index 98d9be1f..00000000 --- a/plugin.video.alfa/channels/bajui.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "id": "bajui", - "name": "Bajui", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "bajui.png", - "banner": "bajui.png", - "fanart": "bajui.png", - "categories": [ - "movie", - "tvshow", - "documentary", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/bajui.py b/plugin.video.alfa/channels/bajui.py deleted file mode 100644 index fde86422..00000000 --- a/plugin.video.alfa/channels/bajui.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger -from channelselector import get_thumb - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas", - url="http://www.bajui.org/descargas/categoria/2/peliculas", - fanart=item.fanart, thumbnail=get_thumb('movies', auto=True))) - itemlist.append(Item(channel=item.channel, title="Series", action="menuseries", - fanart=item.fanart, thumbnail=get_thumb('tvshows', auto=True))) - itemlist.append(Item(channel=item.channel, title="Documentales", action="menudocumentales", - fanart=item.fanart, thumbnail=get_thumb('documentaries', auto=True))) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search", - fanart=item.fanart, thumbnail=get_thumb('search', auto=True))) - return itemlist - - -def menupeliculas(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Películas - Novedades", action="peliculas", url=item.url, - fanart=item.fanart, viewmode="movie_with_plot")) - itemlist.append( - Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre", - fanart=item.fanart, viewmode="movie_with_plot")) - - data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data, '') - patron = '([^<]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, title in matches: - scrapedurl = urlparse.urljoin(item.url, url) - itemlist.append(Item(channel=item.channel, title="Películas en " + title, action="peliculas", url=scrapedurl, - fanart=item.fanart, viewmode="movie_with_plot")) - - itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", fanart=item.fanart)) - return itemlist - - -def menuseries(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas", - url="http://www.bajui.org/descargas/categoria/3/series", - fanart=item.fanart, viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas", - url="http://www.bajui.org/descargas/categoria/3/series/orden:nombre", - fanart=item.fanart, viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas", - url="http://www.bajui.org/descargas/subcategoria/11/hd/orden:nombre", - fanart=item.fanart, viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", - fanart=item.fanart)) - return itemlist - - -def menudocumentales(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas", - url="http://www.bajui.org/descargas/categoria/7/docus-y-tv", - fanart=item.fanart, viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas", - url="http://www.bajui.org/descargas/categoria/7/docus-y-tv/orden:nombre", - fanart=item.fanart, viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", - fanart=item.fanart)) - return itemlist - - -def search(item, texto, categoria=""): - logger.info(item.url + " search " + texto) - itemlist = [] - url = item.url - texto = texto.replace(" ", "+") - logger.info("categoria: " + categoria + " url: " + url) - try: - item.url = "http://www.bajui.org/descargas/busqueda/%s" - item.url = item.url % texto - itemlist.extend(peliculas(item)) - return itemlist - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def peliculas(item, paginacion=True): - logger.info() - url = item.url - data = httptools.downloadpage(url).data - patron = '
  • 0: - scrapedurl = urlparse.urljoin("http://www.bajui.org/", matches[0]) - pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl, - fanart=item.fanart, viewmode="movie_with_plot") - if not paginacion: - itemlist.extend(peliculas(pagitem)) - else: - itemlist.append(pagitem) - - return itemlist - - -def clean_plot(scrapedplot): - scrapedplot = scrapedplot.replace("\n", "").replace("\r", "") - scrapedplot = re.compile("TÍTULO ORIGINAL[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("AÑO[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Año[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("DURACIÓN[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Duración[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("PAIS[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("PAÍS[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Pais[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("País[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("DIRECTOR[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("DIRECCIÓN[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Dirección[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("REPARTO[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Reparto[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Interpretación[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("GUIÓN[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Guión[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("MÚSICA[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Música[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("FOTOGRAFÍA[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Fotografía[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("PRODUCTORA[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Producción[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Montaje[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Vestuario[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("GÉNERO[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("GENERO[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Genero[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Género[^<]+
    ", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("PREMIOS[^<]+
    ", re.DOTALL).sub("", scrapedplot) - - scrapedplot = re.compile("SINOPSIS", re.DOTALL).sub("", scrapedplot) - scrapedplot = re.compile("Sinopsis", re.DOTALL).sub("", scrapedplot) - scrapedplot = scrapertools.htmlclean(scrapedplot) - return scrapedplot - - -def enlaces(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - - try: - item.plot = scrapertools.get_match(data, '(.*?)') - item.plot = clean_plot(item.plot) - except: - pass - - try: - item.thumbnail = scrapertools.get_match(data, '
    ]+>Mostrar enlaces
    [^<]+' - patron += '
    (.*?)
    ' - - matches = re.compile(patron, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for thumbnail, usuario, fecha, id, id2, servidores in matches: - patronservidores = '.*?\(current\).*?href='([^']+)'") - if len(next) > 0: - url = next - itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url)) - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - if not "Siguiente >>" in item.title: - if "0." in str(item.infoLabels['rating']): - item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" - else: - item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" - item.title = item.title + " " + str(item.infoLabels['rating']) - except: - pass - return itemlist - - -def scraper(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url, headers=header, cookies=False).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - if item.contentType == "movie": - patron = '' - matches = scrapertools.find_multiple_matches(data, patron) - for url, title, year in matches: - titulo = re.sub(r"\d+\d+\d+\d+|\(.*?\).*", "", title) - title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*", "", title) - title = title.replace("Autosia", "Autopsia") - title = re.sub(r"’|PRE-Estreno", "'", title) - new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url, - fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True) - new_item.infoLabels['year'] = get_year(url) - itemlist.append(new_item) - else: - patron = '(?s)

    .*?\(current\).*?href='([^']+)'") - if len(next) > 0: - url = next - - itemlist.append( - item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png", - url=url)) - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - if not "Siguiente >>" in item.title: - if "0." in str(item.infoLabels['rating']): - item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" - else: - item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" - item.title = item.title + " " + str(item.infoLabels['rating']) - - except: - pass - return itemlist - - -def findtemporadas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - if len(item.extra.split("|")): - if len(item.extra.split("|")) >= 4: - fanart = item.extra.split("|")[2] - extra = item.extra.split("|")[3] - try: - fanart_extra = item.extra.split("|")[4] - except: - fanart_extra = item.extra.split("|")[3] - try: - fanart_info = item.extra.split("|")[5] - except: - fanart_extra = item.extra.split("|")[3] - elif len(item.extra.split("|")) == 3: - fanart = item.extra.split("|")[2] - extra = item.extra.split("|")[0] - fanart_extra = item.extra.split("|")[0] - fanart_info = item.extra.split("|")[1] - elif len(item.extra.split("|")) == 2: - fanart = item.extra.split("|")[1] - extra = item.extra.split("|")[0] - fanart_extra = item.extra.split("|")[0] - fanart_info = item.extra.split("|")[1] - else: - extra = item.extra - fanart_extra = item.extra - fanart_info = item.extra - try: - logger.info(fanart_extra) - logger.info(fanart_info) - except: - fanart_extra = item.fanart - fanart_info = item.fanart - bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+).*?<\/a>(.*?)<\/table>') - for temporada, bloque_epis in bloque_episodios: - item.infoLabels = item.InfoLabels - item.infoLabels['season'] = temporada - itemlist.append(item.clone(action="epis", - title="[COLOR saddlebrown][B]Temporada [/B][/COLOR]" + "[COLOR sandybrown][B]" + temporada + "[/B][/COLOR]", - url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle, - show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info, - datalibrary=data, folder=True)) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - item.fanart = fanart - item.extra = extra - if config.get_videolibrary_support() and itemlist: - if len(bloque_episodios) == 1: - extra = "epis" - else: - extra = "epis###serie_add" - - infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'], - 'imdb_id': item.infoLabels['imdb_id']} - itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc", - action="add_serie_to_library", extra=extra, url=item.url, - contentSerieName=item.fulltitle, infoLabels=infoLabels, - thumbnail='http://imgur.com/xQNTqqy.png', datalibrary=data)) - return itemlist - - -def epis(item): - logger.info() - itemlist = [] - if item.extra == "serie_add": - item.url = item.datalibrary - patron = '.*?(\d+x\d+).*?td>' - matches = scrapertools.find_multiple_matches(item.url, patron) - for idioma, url, epi in matches: - episodio = scrapertools.find_single_match(epi, '\d+x(\d+)') - item.infoLabels['episode'] = episodio - itemlist.append( - item.clone(title="[COLOR orange]" + epi + "[/COLOR]" + "[COLOR sandybrown] " + idioma + "[/COLOR]", url=url, - action="findvideos", show=item.show, fanart=item.extra, extra=item.extra, - fanart_extra=item.fanart_extra, fanart_info=item.fanart_info, folder=True)) - if item.extra != "serie_add": - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - item.fanart = item.extra - if item.infoLabels['title']: title = "[COLOR burlywood]" + item.infoLabels['title'] + "[/COLOR]" - item.title = item.title + " -- \"" + title + "\"" - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if item.contentType != "movie": - if not item.infoLabels['episode']: - capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)') - patron = '= 2: - extra = item.extra - else: - extra = item.fanart - else: - capitulo = item.title - url_capitulo = item.url - - ext_v, size = ext_size(url_capitulo) - try: - fanart = item.fanart_extra - except: - fanart = item.extra.split("|")[0] - itemlist.append(Item(channel=item.channel, - title="[COLOR chocolate][B]Ver capítulo " + capitulo + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]", - url=url_capitulo, action="play", server="torrent", fanart=fanart, thumbnail=item.thumbnail, - extra=item.extra, fulltitle=item.fulltitle, folder=False)) - if item.infoLabels['episode'] and item.library: - thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg') - if thumbnail == "": - thumbnail = item.thumbnail - if not "assets.fanart" in item.fanart_info: - fanart = item.fanart_info - else: - fanart = item.fanart - itemlist.append(Item(channel=item.channel, title="[COLOR darksalmon][B] info[/B][/COLOR]", - action="info_capitulos", fanart=fanart, thumbnail=item.thumb_art, - thumb_info=item.thumb_info, extra=item.extra, show=item.show, - InfoLabels=item.infoLabels, folder=False)) - if not item.infoLabels['episode']: - itemlist.append( - Item(channel=item.channel, title="[COLOR moccasin][B]Todos los episodios[/B][/COLOR]", url=item.url, - action="findtemporadas", server="torrent", - thumbnail=item.thumbnail, extra=item.extra + "|" + item.thumbnail, contentType=item.contentType, - contentTitle=item.contentTitle, InfoLabels=item.infoLabels, thumb_art=item.thumb_art, - thumb_info=item.thumbnail, fulltitle=item.fulltitle, library=item.library, folder=True)) - else: - url = scrapertools.find_single_match(data, '

    .*?href="([^"]+)"') - item.infoLabels['year'] = None - ext_v, size = ext_size(url) - itemlist.append(Item(channel=item.channel, - title="[COLOR saddlebrown][B]Torrent [/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]", - url=url, action="play", server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, - extra=item.extra, InfoLabels=item.infoLabels, folder=False)) - - if item.library and config.get_videolibrary_support() and len(itemlist) > 0: - infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], - 'title': item.infoLabels['title']} - itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", - action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, - text_color="0xFFe5ffcc", - thumbnail='http://imgur.com/xQNTqqy.png')) - return itemlist - - -def info_capitulos(item, images={}): - logger.info() - try: - url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str( - item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml" - if "/0" in url: - url = url.replace("/0", "/") - from core import jsontools - data = httptools.downloadpage(url).data - if "episodes" in data: - image = scrapertools.find_single_match(data, '.*?(.*?)') - image = "http://thetvdb.com/banners/" + image - else: - try: - image = item.InfoLabels['episodio_imagen'] - except: - image = "http://imgur.com/ZiEAVOD.png" - - foto = item.thumb_info - if not ".png" in foto: - foto = "http://imgur.com/PRiEW1D.png" - try: - title = item.InfoLabels['episodio_titulo'] - except: - title = "" - title = "[COLOR red][B]" + title + "[/B][/COLOR]" - - try: - plot = "[COLOR peachpuff]" + str(item.InfoLabels['episodio_sinopsis']) + "[/COLOR]" - except: - plot = scrapertools.find_single_match(data, '(.*?)') - if plot == "": - plot = "Sin información todavia" - try: - rating = item.InfoLabels['episodio_vote_average'] - except: - rating = 0 - try: - if rating >= 5 and rating < 8: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]" - elif rating >= 8 and rating < 10: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]" - elif rating == 10: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]" - else: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" - except: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" - if "10." in rating: - rating = re.sub(r'10\.\d+', '10', rating) - except: - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Este capitulo no tiene informacion..." - plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" - image = "http://s6.postimg.cc/ub7pb76c1/noinfo.png" - foto = "http://s6.postimg.cc/nm3gk1xox/noinfosup2.png" - rating = "" - ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) - ventana.doModal() - - -def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): - i = 0 - while i < len(text): - m = match(text, i) - s = m.group(m.lastindex) - i = m.end() - if m.lastindex == 2: - yield "s" - yield text[i:i + int(s)] - i = i + int(s) - else: - yield s - - -def decode_item(next, token): - if token == "i": - # integer: "i" value "e" - data = int(next()) - if next() != "e": - raise ValueError - elif token == "s": - # string: "s" value (virtual tokens) - data = next() - elif token == "l" or token == "d": - # container: "l" (or "d") values "e" - data = [] - tok = next() - while tok != "e": - data.append(decode_item(next, tok)) - tok = next() - if token == "d": - data = dict(zip(data[0::2], data[1::2])) - else: - raise ValueError - return data - - -def decode(text): - try: - src = tokenize(text) - data = decode_item(src.next, src.next()) - for token in src: # look for more tokens - raise SyntaxError("trailing junk") - except (AttributeError, ValueError, StopIteration): - try: - data = data - except: - data = src - return data - - -def convert_size(size): - import math - if (size == 0): - return '0B' - size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - i = int(math.floor(math.log(size, 1024))) - p = math.pow(1024, i) - s = round(size / p, 2) - return '%s %s' % (s, size_name[i]) - - -def get_year(url): - data = httptools.downloadpage(url, headers=header, cookies=False).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - year = scrapertools.find_single_match(data, '

    .*?(\d+\d+\d+\d+)') - if year == "": - year = " " - return year - - -def ext_size(url): - torrents_path = config.get_videolibrary_path() + '/torrents' - if not os.path.exists(torrents_path): - os.mkdir(torrents_path) - try: - urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url, torrents_path + "/temp.torrent") - pepe = open(torrents_path + "/temp.torrent", "rb").read() - except: - pepe = "" - torrent = decode(pepe) - try: - name = torrent["info"]["name"] - sizet = torrent["info"]['length'] - sizet = convert_size(sizet) - except: - name = "no disponible" - try: - check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") - size = max([int(i) for i in check_video]) - for file in torrent["info"]["files"]: - manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) - if str(size) in manolo: - video = manolo - size = convert_size(size) - ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) - try: - os.remove(torrents_path + "/temp.torrent") - except: - pass - except: - try: - size = sizet - ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", name) - except: - size = "NO REPRODUCIBLE" - ext_v = "" - try: - os.remove(torrents_path + "/temp.torrent") - except: - pass - if "rar" in ext_v: - ext_v = ext_v + " -- No reproducible" - size = "" - return ext_v, size - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'torrent': - item.url = host + '/peliculas/' - item.contentType="movie" - itemlist = scraper(item) - if itemlist[-1].title == "[COLOR springgreen][B]Siguiente >>[/B][/COLOR]": - itemlist.pop() - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - return itemlist diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py index 4a9d5a38..d8c4907d 100644 --- a/plugin.video.alfa/channels/doomtv.py +++ b/plugin.video.alfa/channels/doomtv.py @@ -35,7 +35,9 @@ def mainlist(item): action="lista", thumbnail=get_thumb('all', auto=True), fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png', - url='%s%s'%(host,'peliculas/page/1') + url='%s%s'%(host,'peliculas/'), + first=0 + )) itemlist.append( @@ -43,7 +45,7 @@ def mainlist(item): action="seccion", thumbnail=get_thumb('genres', auto=True), fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png', - url='%s%s' % (host, 'peliculas/page/1'), + url='%s%s' % (host, 'peliculas/'), )) itemlist.append( @@ -51,7 +53,8 @@ def mainlist(item): action="lista", thumbnail=get_thumb('more watched', auto=True), fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png', - url='%s%s'%(host,'top-imdb/page/1'), + url='%s%s'%(host,'top-imdb/'), + first=0 )) itemlist.append( @@ -69,9 +72,7 @@ def lista(item): logger.info() itemlist = [] - max_items = 20 - next_page_url = '' - + next = False data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) @@ -80,23 +81,13 @@ def lista(item): matches = re.compile(patron, re.DOTALL).findall(data) - if item.next_page != 'b': - if len(matches) > max_items: - next_page_url = item.url - matches = matches[:max_items] - next_page = 'b' - else: - matches = matches[max_items:] - next_page = 'a' - next_page_str = scrapertools.find_single_match(data,"
  • (\d+)") - next_page_num = int(next_page_str)+1 - page_base = re.sub(r'(page\/\d+)','', item.url) - next_page_url = '%s%s%s'%(page_base,'page/',next_page_num) + first = item.first + last = first + 19 + if last > len(matches): + last = len(matches) + next = True - if next_page_url: - next_page_url = next_page_url - - for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches: + for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]: url = scrapedurl thumbnail = scrapedthumbnail @@ -118,17 +109,17 @@ def lista(item): contentTitle=title )) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) - # Paginacion - if next_page_url != '': - itemlist.append( - Item(channel=item.channel, - action="lista", - title='Siguiente >>>', - url=next_page_url, - thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', - extra=item.extra, - next_page=next_page - )) + + if not next: + url_next_page = item.url + first = last + else: + url_next_page = scrapertools.find_single_match(data, "") + first = 0 + + if url_next_page: + itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first)) + return itemlist @@ -153,7 +144,8 @@ def seccion(item): action='lista', title=title, url=url, - thumbnail=thumbnail + thumbnail=thumbnail, + first=0 )) return itemlist @@ -162,6 +154,7 @@ def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = item.url + texto + item.first=0 if texto != '': return lista(item) @@ -178,6 +171,7 @@ def newest(categoria): item.url = host + 'categoria/animacion/' elif categoria == 'terror': item.url = host + '/categoria/terror/' + item.first=0 itemlist = lista(item) if itemlist[-1].title == 'Siguiente >>>': itemlist.pop() diff --git a/plugin.video.alfa/channels/guaridavalencianista.json b/plugin.video.alfa/channels/guaridavalencianista.json deleted file mode 100755 index 30e2e9d2..00000000 --- a/plugin.video.alfa/channels/guaridavalencianista.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "guaridavalencianista", - "name": "La Guarida valencianista", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "guaridavalencianista.png", - "banner": "guaridavalencianista.png", - "categories": [ - "documentary" - ] -} diff --git a/plugin.video.alfa/channels/guaridavalencianista.py b/plugin.video.alfa/channels/guaridavalencianista.py deleted file mode 100755 index aa52d5da..00000000 --- a/plugin.video.alfa/channels/guaridavalencianista.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append(Item(channel=item.channel, title="Novedades", action="listvideos", - url="http://guaridavalencia.blogspot.com.es")) - # itemlist.append( Item(channel=item.channel, title="Documentales - Series Disponibles" , action="DocuSeries" , url="http://guaridavalencia.blogspot.com/")) - itemlist.append( - Item(channel=item.channel, title="Categorias", action="DocuTag", url="http://guaridavalencia.blogspot.com.es")) - itemlist.append(Item(channel=item.channel, title="Partidos de liga (Temporada 2014/2015)", action="listvideos", - url="http://guaridavalencia.blogspot.com.es/search/label/PARTIDOS%20DEL%20VCF%20%28TEMPORADA%202014-15%29")) - - return itemlist - - -def DocuSeries(item): - logger.info() - itemlist = [] - - # Descarga la página - data = scrapertools.cache_page(item.url) - - # Extrae las entradas (carpetas) - patronvideos = '
  • ([^<]+)
  • ' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for match in matches: - scrapedurl = match[0] - scrapedtitle = match[1] - scrapedthumbnail = "" - scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - return itemlist - - -def DocuTag(item): - logger.info() - itemlist = [] - # Descarga la página - data = scrapertools.cache_page(item.url) - # ~ patronvideos = "([^<]+)[^<]+(.+?)" - patronvideos = "([^<]+)[^0-9]+([0-9]+)[^<]+(.+?)" - matches = re.compile(patronvideos, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for match in matches: - scrapedurl = match[0] - # Se debe quitar saltos de linea en match[1] - scrapedtitle = match[1][1:-1] + " (" + match[2] + ")" - # ~ scrapedtitle = match[1] - scrapedthumbnail = "" - scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - return itemlist - - -def DocuARCHIVO(item): - logger.info() - itemlist = [] - - # Descarga la página - data = scrapertools.cache_page(item.url) - patronvideos = "([^<]+)[^<]+" - patronvideos += "(.+?)" - matches = re.compile(patronvideos, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for match in matches: - scrapedurl = match[0] - scrapedtitle = match[1] + " " + match[2] - scrapedthumbnail = "" - scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - return itemlist - - -def listvideos(item): - logger.info() - itemlist = [] - - scrapedthumbnail = "" - scrapedplot = "" - - # Descarga la página - data = scrapertools.cache_page(item.url) - patronvideos = "

    ]+>", " ", scrapedtitle) - scrapedtitle = scrapertools.unescape(scrapedtitle)[1:-1] - scrapedurl = match[0] - regexp = re.compile(r'src="(http[^"]+)"') - - matchthumb = regexp.search(match[2]) - if matchthumb is not None: - scrapedthumbnail = matchthumb.group(1) - matchplot = re.compile('
    (
    ', re.DOTALL).findall(match[2]) - - if len(matchplot) > 0: - scrapedplot = matchplot[0] - # print matchplot - else: - scrapedplot = "" - - scrapedplot = re.sub("<[^>]+>", " ", scrapedplot) - scrapedplot = scrapertools.unescape(scrapedplot) - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - # Extrae la marca de siguiente página - patronvideos = " 0: - scrapedtitle = "Página siguiente" - scrapedurl = urlparse.urljoin(item.url, matches[0]) - scrapedthumbnail = "" - scrapedplot = "" - itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - return itemlist - - # ~ return itemlist - - -def findvideos(item): - logger.info() - data = scrapertools.cachePage(item.url) - - # Busca los enlaces a los videos - - listavideos = servertools.findvideos(data) - - if item is None: - item = Item() - - itemlist = [] - for video in listavideos: - scrapedtitle = video[0].strip() + " - " + item.title.strip() - scrapedurl = video[1] - server = video[2] - - itemlist.append(Item(channel=item.channel, title=scrapedtitle, action="play", server=server, url=scrapedurl, - thumbnail=item.thumbnail, show=item.show, plot=item.plot, folder=False)) - - return itemlist diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py index f02dff6f..a05cd914 100644 --- a/plugin.video.alfa/channels/hdfull.py +++ b/plugin.video.alfa/channels/hdfull.py @@ -28,16 +28,12 @@ def settingCanal(item): def login(): logger.info() - data = agrupa_datos(httptools.downloadpage(host).data) - patron = "" sid = scrapertools.find_single_match(data, patron) - post = urllib.urlencode({'__csrf_magic': sid}) + "&username=" + config.get_setting('hdfulluser', 'hdfull') + "&password=" + config.get_setting( 'hdfullpassword', 'hdfull') + "&action=login" - httptools.downloadpage(host, post=post) @@ -56,15 +52,12 @@ def mainlist(item): else: login() itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) - return itemlist def menupeliculas(item): logger.info() - itemlist = [] - if account: itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Favoritos[/B][/COLOR]", @@ -72,7 +65,6 @@ def menupeliculas(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Pendientes[/B][/COLOR]", url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True)) - itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True)) itemlist.append( Item(channel=item.channel, action="fichas", title="Últimas películas", url=host + "/peliculas", folder=True)) @@ -89,15 +81,12 @@ def menupeliculas(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Vistas[/B][/COLOR]", url=host + "/a/my?target=movies&action=seen&start=-28&limit=28", folder=True)) - return itemlist def menuseries(item): logger.info() - itemlist = [] - if account: itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Siguiendo[/B][/COLOR]", @@ -105,9 +94,7 @@ def menuseries(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Para Ver[/B][/COLOR]", url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True)) - itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True)) - itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos Emitidos", url=host + "/a/episodes?action=latest&start=-24&limit=24&elang=ALL", folder=True)) itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Episodios Estreno", @@ -132,20 +119,16 @@ def menuseries(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Vistas[/B][/COLOR]", url=host + "/a/my?target=shows&action=seen&start=-28&limit=28", folder=True)) - return itemlist def search(item, texto): logger.info() - data = agrupa_datos(httptools.downloadpage(host).data) - sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"') item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto item.title = "Buscar..." item.url = host + "/buscar" - try: return fichas(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla @@ -158,59 +141,44 @@ def search(item, texto): def series_abc(item): logger.info() - itemlist = [] - az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ#" - for l in az: itemlist.append( Item(channel=item.channel, action='fichas', title=l, url=host + "/series/abc/" + l.replace('#', '9'))) - return itemlist def items_usuario(item): logger.info() - itemlist = [] ## Carga estados status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) - ## Fichas usuario url = item.url.split("?")[0] post = item.url.split("?")[1] - old_start = scrapertools.get_match(post, 'start=([^&]+)&') limit = scrapertools.get_match(post, 'limit=(\d+)') start = "%s" % (int(old_start) + int(limit)) - post = post.replace("start=" + old_start, "start=" + start) next_page = url + "?" + post - ## Carga las fichas de usuario data = httptools.downloadpage(url, post=post).data fichas_usuario = jsontools.load(data) - for ficha in fichas_usuario: - try: title = ficha['title']['es'].strip() except: title = ficha['title']['en'].strip() - try: title = title.encode('utf-8') except: pass - show = title - try: thumbnail = host + "/thumbs/" + ficha['thumbnail'] except: thumbnail = host + "/thumbs/" + ficha['thumb'] - try: url = urlparse.urljoin(host, '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1" action = "episodios" @@ -237,37 +205,26 @@ def items_usuario(item): action = "findvideos" str = get_status(status, 'movies', ficha['id']) if str != "": title += str - - # try: title = title.encode('utf-8') - # except: pass - itemlist.append( Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url, thumbnail=thumbnail, show=show, folder=True)) - if len(itemlist) == int(limit): itemlist.append( Item(channel=item.channel, action="items_usuario", title=">> Página siguiente", url=next_page, folder=True)) - return itemlist def listado_series(item): logger.info() - itemlist = [] - data = agrupa_datos(httptools.downloadpage(item.url).data) - patron = '' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: url = scrapedurl + "###0;1" itemlist.append( Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=url, show=scrapedtitle, contentType="tvshow")) - return itemlist @@ -278,22 +235,19 @@ def fichas(item): infoLabels=dict() ## Carga estados status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) - if item.title == "Buscar...": data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data) s_p = scrapertools.get_match(data, '

    (.*?)