diff --git a/plugin.video.alfa/channels/animeyt.json b/plugin.video.alfa/channels/animeyt.json deleted file mode 100644 index 54ead4c4..00000000 --- a/plugin.video.alfa/channels/animeyt.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "id": "animeyt", - "name": "AnimeYT", - "active": true, - "adult": false, - "language": "cast, lat", - "thumbnail": "http://i.imgur.com/dHpupFk.png", - "categories": [ - "anime", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "modo_grafico", - "type": "bool", - "label": "información extra", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/animeyt.py b/plugin.video.alfa/channels/animeyt.py deleted file mode 100644 index 40e78e1a..00000000 --- a/plugin.video.alfa/channels/animeyt.py +++ /dev/null @@ -1,510 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from channels import renumbertools -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from core import tmdb -from platformcode import config,logger - -import gktools, random, time, urllib - -__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt') - -HOST = "http://animeyt.tv/" - -def mainlist(item): - logger.info() - - itemlist = list() - - itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST)) - - itemlist.append(Item(channel=item.channel, title="Recientes", action="recientes", url=HOST)) - - itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico", url=HOST)) - - itemlist.append(Item(channel=item.channel, title="Búsqueda", action="search", url=urlparse.urljoin(HOST, "busqueda?terminos="))) - - itemlist = renumbertools.show_option(item.channel, itemlist) - - return itemlist - - -def novedades(item): - logger.info() - itemlist = list() - if not item.pagina: - item.pagina = 0 - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
", "", data) - - patron_novedades = '
[\s\S]+?

Comentarios

' - - data_novedades = scrapertools.find_single_match(data, patron_novedades) - - patron = 'href="([^"]+)"[\s\S]+?src="([^"]+)"[^<]+alt="([^"]+) (\d+)([^"]+)' - - matches = scrapertools.find_multiple_matches(data_novedades, patron) - - for url, img, scrapedtitle, eps, info in matches[item.pagina:item.pagina + 20]: - title = scrapedtitle + " " + "1x" + eps + info - title = title.replace("Sub Español", "").replace("sub español", "") - infoLabels = {'filtro': {"original_language": "ja"}.items()} - itemlist.append(Item(channel=item.channel, title=title, url=url, thumb=img, action="findvideos", contentTitle=scrapedtitle, contentSerieName=scrapedtitle, infoLabels=infoLabels, contentType="tvshow")) - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for it in itemlist: - it.thumbnail = it.thumb - except: - pass - - if len(matches) > item.pagina + 20: - pagina = item.pagina + 20 - itemlist.append(item.clone(channel=item.channel, action="novedades", url=item.url, title=">> Página Siguiente", pagina=pagina)) - - return itemlist - - -def alfabetico(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
", "", data) - - - for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ': - titulo = letra - if letra == "0": - letra = "num" - itemlist.append(Item(channel=item.channel, action="recientes", title=titulo, - url=urlparse.urljoin(HOST, "animes?tipo=0&genero=0&anio=0&letra={letra}".format(letra=letra)))) - - - return itemlist - - -def search(item, texto): - logger.info() - - texto = texto.replace(" ","+") - item.url = item.url+texto - if texto!='': - return recientes(item) - - -def recientes(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
", "", data) - - patron_recientes = '
[\s\S]+?' - - data_recientes = scrapertools.find_single_match(data, patron_recientes) - - patron = '(.*?)<.*?

(.*?)(.*?)' - - matches = scrapertools.find_multiple_matches(data_recientes, patron) - - for url, thumbnail, plot, title, cat in matches: - itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat, context=renumbertools.context(item))) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) - - paginacion = scrapertools.find_single_match(data, '", "", data) - - from collections import OrderedDict # cambiado dict por OrderedDict para mantener el mismo orden que en la web - - matches = scrapertools.find_multiple_matches(data, '
  • ([^<]*)') - d_links = OrderedDict(matches) - - matches = scrapertools.find_multiple_matches(data, 'if \(mirror == (\d*)\).*?iframe src="([^"]*)"') - d_frames = OrderedDict(matches) - - for k in d_links: - if k in d_frames and d_frames[k] != '': - tit = scrapertools.find_single_match(d_frames[k], '/([^\./]*)\.php\?') - if tit == '': - tit = 'mega' if 'mega.nz/' in d_frames[k] else 'dailymotion' if 'dailymotion.com/' in d_frames[k] else'noname' - if tit == 'id' and 'yourupload.com/' in d_frames[k]: tit = 'yourupload' - title = 'Opción %s (%s)' % (d_links[k], tit) - - itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=d_frames[k], referer=item.url)) - - - if item.extra != "library": - if config.get_videolibrary_support() and item.extra: - itemlist.append(item.clone(channel=item.channel, title="[COLOR yellow]Añadir pelicula a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="library", contentTitle=item.show, contentType="movie")) - - return itemlist - - -def play(item): - logger.info() - itemlist = [] - - if item.url.startswith('https://www.dailymotion.com/'): - itemlist.append(item.clone(url=item.url, server='dailymotion')) - - elif item.url.startswith('https://mega.nz/'): - itemlist.append(item.clone(url=item.url.replace('embed',''), server='mega')) - - elif item.url.startswith('https://s2.animeyt.tv/rakuten.php?'): - # 1- Descargar - data, ck = gktools.get_data_and_cookie(item) - - # 2- Calcular datos - gsv = scrapertools.find_single_match(data, '