From e24311b20e297031a8aefede0f85e37af11b77c2 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Fri, 26 Jan 2018 16:50:51 -0500 Subject: [PATCH] animeid: fix --- plugin.video.alfa/channels/animeid.py | 225 +++++++------------------- 1 file changed, 62 insertions(+), 163 deletions(-) diff --git a/plugin.video.alfa/channels/animeid.py b/plugin.video.alfa/channels/animeid.py index 520d8c3c..5a9400ca 100755 --- a/plugin.video.alfa/channels/animeid.py +++ b/plugin.video.alfa/channels/animeid.py @@ -4,25 +4,25 @@ import re import urlparse from core import httptools +from core import jsontools from core import scrapertools +from core import servertools from core.item import Item from platformcode import config, logger -CHANNEL_HOST = "http://animeid.tv/" - +CHANNEL_HOST = "https://www.animeid.tv/" def mainlist(item): logger.info() - itemlist = list() itemlist.append( - Item(channel=item.channel, action="novedades_series", title="Últimas series", url="http://www.animeid.tv/")) + Item(channel=item.channel, action="novedades_series", title="Últimas series", url=CHANNEL_HOST)) itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", - url="http://www.animeid.tv/", viewmode="movie_with_plot")) + url=CHANNEL_HOST, viewmode="movie_with_plot")) itemlist.append( - Item(channel=item.channel, action="generos", title="Listado por genero", url="http://www.animeid.tv/")) + Item(channel=item.channel, action="generos", title="Listado por genero", url=CHANNEL_HOST)) itemlist.append( - Item(channel=item.channel, action="letras", title="Listado alfabetico", url="http://www.animeid.tv/")) + Item(channel=item.channel, action="letras", title="Listado alfabetico", url=CHANNEL_HOST)) itemlist.append(Item(channel=item.channel, action="search", title="Buscar...")) return itemlist @@ -33,7 +33,7 @@ def newest(categoria): item = Item() try: if categoria == 'anime': - item.url = "http://animeid.tv/" + item.url = CHANNEL_HOST itemlist = novedades_episodios(item) # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: @@ -50,19 +50,17 @@ def search(item, texto): itemlist = [] if item.url == "": - item.url = "http://www.animeid.tv/ajax/search?q=" + item.url = CHANNEL_HOST + "ajax/search?q=" texto = texto.replace(" ", "+") item.url = item.url + texto try: headers = [] headers.append( ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"]) - headers.append(["Referer", "http://www.animeid.tv/"]) + headers.append(["Referer", CHANNEL_HOST]) headers.append(["X-Requested-With", "XMLHttpRequest"]) data = scrapertools.cache_page(item.url, headers=headers) data = data.replace("\\", "") - logger.debug("data=" + data) - patron = '{"id":"([^"]+)","text":"([^"]+)","date":"[^"]*","image":"([^"]+)","link":"([^"]+)"}' matches = re.compile(patron, re.DOTALL).findall(data) @@ -71,8 +69,6 @@ def search(item, texto): url = urlparse.urljoin(item.url, scrapedurl) thumbnail = scrapedthumbnail plot = "" - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") - itemlist.append( Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movie_with_plot")) @@ -89,240 +85,146 @@ def search(item, texto): def novedades_series(item): logger.info() - - # Descarga la pagina - data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data, '
(.*?)
') - patronvideos = '
  • ([^<]+)([^<]+)' - matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] - + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, '
    (.*?)
    ') + patronvideos = '(?s)
    .*?tipo\d+">([^<]+).*?([^<]+)' + matches = re.compile(patronvideos, re.DOTALL).findall(data) for url, tipo, title in matches: scrapedtitle = title + " (" + tipo + ")" scrapedurl = urlparse.urljoin(item.url, url) - scrapedthumbnail = "" - scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, show=title, viewmode="movie_with_plot")) - + show=title, viewmode="movie_with_plot")) return itemlist def novedades_episodios(item): logger.info() - - # Descarga la pagina - #
    Uchuu Kyoudai #35
    Uchuu Kyoudai

    Una noche en el año 2006, cuando eran jovenes, los dos hermanos Mutta (el mayor) y Hibito (el menor) vieron un OVNI que hiba en dirección hacia la luna. Esa misma noche decidieron que ellos se convertirian en astronautas y irian al espacio exterior. En el año 2050, Hibito se ha convertido en astronauta y que ademas está incluido en una misión que irá a la luna. En cambio Mutta siguió una carrera mas tradicional, y terminó trabajando en una compañia de fabricación de automoviles. Sin embargo, Mutta termina arruinando su carrera por ciertos problemas que tiene con su jefe. Ahora bien, no sólo perdió su trabajo si no que fue incluido en la lista negra de la industria laboral. Pueda ser que esta sea su unica oportunidad que tenga Mutta de volver a perseguir su sueño de la infancia y convertirse en astronauta, al igual que su perqueño hermano Hibito.

    - # (.*?)') - - patronvideos = '[^<]+
    ([^<]+)
    [^<]+
    ]+src="([^"]+)"[\s\S]+?

    (.+?)

    ' + data = scrapertools.find_single_match(data, '
    (.*?)
    ') + patronvideos = '(?s)
    [^<]+
    ([^<]+).*?src="([^"]+)"[\s\S]+?

    (.+?)

    ' matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] - for url, title, thumbnail, plot in matches: scrapedtitle = scrapertools.entityunescape(title) scrapedurl = urlparse.urljoin(item.url, url) scrapedthumbnail = thumbnail scrapedplot = plot - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - - episodio = scrapertools.get_match(scrapedtitle, '\s+#(.*?)$') + episodio = scrapertools.find_single_match(scrapedtitle, '\s+#(.*?)$') contentTitle = scrapedtitle.replace('#' + episodio, '') - itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle)) - return itemlist def generos(item): logger.info() - - # Descarga la pagina - data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data, '
    (.*?)
    ') - patronvideos = '
  • ([^<]+)' - matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] - + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, '
    (.*?)
    ') + patronvideos = '(?s)([^<]+)' + matches = re.compile(patronvideos, re.DOTALL).findall(data) for url, title in matches: scrapedtitle = title scrapedurl = urlparse.urljoin(item.url, url) scrapedthumbnail = "" scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append( Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=title, viewmode="movie_with_plot")) - return itemlist def letras(item): logger.info() - - # Descarga la pagina data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data, '
      (.*?)
    ') + data = scrapertools.find_single_match(data, '
      (.*?)
    ') patronvideos = '
  • ([^<]+)' matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] - for url, title in matches: scrapedtitle = title scrapedurl = urlparse.urljoin(item.url, url) - scrapedthumbnail = "" - scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append( - Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - plot=scrapedplot, show=title, viewmode="movie_with_plot")) + Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, + show=title, viewmode="movie_with_plot")) return itemlist def series(item): logger.info() - - # Descarga la pagina - data = httptools.downloadpage(item.url).data - logger.debug("datito %s" % data) - - ''' -
    - -
    Aoi Sekai no Chuushin de
    -
    - -
    -
    -
    -

    - El Reino de Segua ha ido perdiendo la guerra contra el Imperio de Ninterdo pero la situación ha cambiado - con la aparición de un chico llamado Gear. Todos los personajes son parodias de protas de videojuegos de - Nintendo y Sega respectivamente, como lo son Sonic the Hedgehog, Super Mario Bros., The Legend of Zelda, - etc. -

    -
    - ''' - patron = '
  • ') + page_url = scrapertools.find_single_match(data, '
  • >
  • ') itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente", url=urlparse.urljoin(item.url, page_url), viewmode="movie_with_plot", thumbnail="", plot="")) except: pass - return itemlist def episodios(item, final=True): logger.info() - - # Descarga la pagina - body = httptools.downloadpage(item.url).data - - try: - scrapedplot = scrapertools.get_match(body, '(.*?)') - patron = '
  • (.*?)
  • ' - matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] - - for url, title in matches: - scrapedtitle = scrapertools.htmlclean(title) - - try: - episodio = scrapertools.get_match(scrapedtitle, "Capítulo\s+(\d+)") - titulo_limpio = re.compile("Capítulo\s+(\d+)\s+", re.DOTALL).sub("", scrapedtitle) - if len(episodio) == 1: - scrapedtitle = "1x0" + episodio + " - " + titulo_limpio - else: - scrapedtitle = "1x" + episodio + " - " + titulo_limpio - except: - pass - - scrapedurl = urlparse.urljoin(item.url, url) - # scrapedthumbnail = "" - # scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - - itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show)) - - try: - next_page = scrapertools.get_match(body, '\>\;') - next_page = urlparse.urljoin(item.url, next_page) - item2 = Item(channel=item.channel, action="episodios", title=item.title, url=next_page, - thumbnail=item.thumbnail, plot=item.plot, show=item.show, viewmode="movie_with_plot") - itemlist.extend(episodios(item2, final=False)) - except: - import traceback - logger.error(traceback.format_exc()) - - if final and config.get_videolibrary_support(): + data = httptools.downloadpage(item.url).data + data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)') + CHANNEL_HEADERS = [ + ["Host", "m.animeid.tv"], + ["X-Requested-With", "XMLHttpRequest"] + ] + page = 0 + while True: + page += 1 + u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" %(data_id, page) + data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data + # Cuando ya no hay datos devuelve: "list":[] + if '"list":[]' in data: + break + dict_data = jsontools.load(data) + list = dict_data['list'] + for dict in list: + itemlist.append(Item(action = "findvideos", + channel = item.channel, + title = "1x" + dict["numero"] + " - " + dict["date"], + url = CHANNEL_HOST + dict['href'], + thumbnail = item.thumbnail, + show = item.show, + viewmode = "movie_with_plot" + )) + if config.get_videolibrary_support(): itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) - return itemlist def findvideos(item): logger.info() - - data = httptools.downloadpage(item.url).data itemlist = [] - + data = httptools.downloadpage(item.url).data url_anterior = scrapertools.find_single_match(data, '
  • « Capítulo anterior') url_siguiente = scrapertools.find_single_match(data, '
  • Siguiente capítulo »') - - data = scrapertools.find_single_match(data, '
      (.*?)
    ') - data = data.replace("\\/", "/") - data = data.replace("%3A", ":") - data = data.replace("%2F", "/") - logger.info("data=" + data) - - # http%3A%2F%2Fwww.animeid.moe%2Fstream%2F41TLmCj7_3q4BQLnfsban7%2F1440956023.mp4 - # http://www.animeid.moe/stream/41TLmCj7_3q4BQLnfsban7/1440956023.mp4 - # http://www.animeid.tv/stream/oiW0uG7yqBrg5TVM5Cm34n/1385370686.mp4 - patron = '(http://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)' - matches = re.compile(patron, re.DOTALL).findall(data) + data = scrapertools.find_single_match(data, '
      (.*?)
    ').decode("unicode-escape") + data = data.replace("\\/", "/").replace("%3A", ":").replace("%2F", "/") + patron = '(https://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)' + matches = scrapertools.find_multiple_matches(data, patron) encontrados = set() for url in matches: if url not in encontrados: @@ -330,15 +232,12 @@ def findvideos(item): Item(channel=item.channel, action="play", title="[directo]", server="directo", url=url, thumbnail="", plot="", show=item.show, folder=False)) encontrados.add(url) - - from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel = item.channel videoitem.action = "play" videoitem.folder = False videoitem.title = "[" + videoitem.server + "]" - if url_anterior: title_anterior = url_anterior.strip("/v/").replace('-', ' ').strip('.html') itemlist.append(Item(channel=item.channel, action="findvideos", title="Anterior: " + title_anterior,