diff --git a/channels/animespace.json b/channels/animespace.json deleted file mode 100644 index 54a542ae..00000000 --- a/channels/animespace.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "id": "animespace", - "name": "AnimeSpace", - "active": false, - "language": [], - "thumbnail": "", - "banner": "", - "categories": [ - "anime", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false - }, - { - "id": "filter_languages", - "type": "list", - "label": "Mostrar enlaces en idioma...", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": [ - "No filtrar", - "VOSE" - ] - }, - { - "id": "checklinks_number", - "type": "list", - "label": "Número de enlaces a verificar", - "default": 1, - "enabled": true, - "visible": "eq(-1,true)", - "lvalues": [ "5", "10", "15", "20" ] - }, - { - "id": "include_in_newest_anime", - "type": "bool", - "label": "Incluir en Novedades - Episodios de anime", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/channels/animespace.py b/channels/animespace.py deleted file mode 100644 index 7f2e3eb1..00000000 --- a/channels/animespace.py +++ /dev/null @@ -1,262 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel AnimeSpace -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- - -import re - -from channelselector import get_thumb -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import logger, config -from specials import autoplay -from specials import renumbertools - -host = config.get_channel_url() - -checklinks = config.get_setting('checklinks', 'animespace') -checklinks_number = config.get_setting('checklinks_number', 'animespace') - -IDIOMAS = {'VOSE': 'VOSE'} -list_language = IDIOMAS.values() -list_quality = [] -list_servers = ['directo', 'openload', 'streamango'] - - -def mainlist(item): - logger.info() - - autoplay.init(item.channel, list_servers, list_quality) - - itemlist = [] - - itemlist.append(Item(channel=item.channel, title="Nuevos Episodios", - action="new_episodes", - thumbnail=get_thumb('new_episodes', auto=True), - url=host)) - - itemlist.append(Item(channel=item.channel, title="Ultimas", - action="list_all", - thumbnail=get_thumb('last', auto=True), - url=host + '/emision')) - - itemlist.append(Item(channel=item.channel, title="Todas", - action="list_all", - thumbnail=get_thumb('all', auto=True), - url=host + '/animes')) - - itemlist.append(Item(channel=item.channel, title="Anime", - action="list_all", - thumbnail=get_thumb('anime', auto=True), - url=host + '/categoria/anime')) - - itemlist.append(Item(channel=item.channel, title="Películas", - action="list_all", - thumbnail=get_thumb('movies', auto=True), - url=host + '/categoria/pelicula')) - - itemlist.append(Item(channel=item.channel, title="OVAs", - action="list_all", - thumbnail='', - url=host + '/categoria/ova')) - - itemlist.append(Item(channel=item.channel, title="ONAs", - action="list_all", - thumbnail='', - url=host + '/categoria/ona')) - - - itemlist.append(Item(channel=item.channel, title="Especiales", - action="list_all", - thumbnail='', - url=host + '/categoria/especial')) - - itemlist.append(Item(channel=item.channel, title="Buscar", - action="search", - url=host + '/search?q=', - thumbnail=get_thumb('search', auto=True), - fanart='https://s30.postimg.cc/pei7txpa9/buscar.png' - )) - - autoplay.show_option(item.channel, itemlist) - itemlist = renumbertools.show_option(item.channel, itemlist) - - return itemlist - - -def get_source(url): - logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) - return data - - -def list_all(item): - logger.info() - - itemlist = [] - - data = get_source(item.url) - patron = '.*?src="([^"]+)".*?' - patron += '

([^<]+)

.*?"fecha">([^<]+)<.*?([^<]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches: - type = type.strip().lower() - url = scrapedurl - thumbnail = scrapedthumbnail - lang = 'VOSE' - title = scrapedtitle - context = renumbertools.context(item) - context2 = autoplay.context - context.extend(context2) - new_item= Item(channel=item.channel, - action='episodios', - title=title, - url=url, - thumbnail=thumbnail, - language = lang, - infoLabels={'year':year} - ) - if type != 'anime': - new_item.contentTitle=title - else: - new_item.plot=type - new_item.contentSerieName=title - new_item.context = context - itemlist.append(new_item) - - # Paginacion - next_page = scrapertools.find_single_match(data, - '"page-item active">.*?.*?') - - if next_page != "": - actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?') - itemlist.append(Item(channel=item.channel, - action="list_all", - title=">> Página siguiente", - url=actual_page + next_page, - thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png' - )) - tmdb.set_infoLabels(itemlist, seekTmdb=True) - return itemlist - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = item.url + texto - try: - if texto != '': - return list_all(item) - else: - return [] - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - -def new_episodes(item): - logger.info() - - itemlist = [] - - full_data = get_source(item.url) - data = scrapertools.find_single_match(full_data, '
.*?
') - patron = '.*?src="([^"]+)".*?' - patron += '.*?([^<]+).*?

([^<]+)

' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches: - url = scrapedurl - lang = 'VOSE' - title = '%s - %s' % (scrapedtitle, epi) - itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail, - action='findvideos', language=lang)) - - return itemlist - -def episodios(item): - logger.info() - itemlist = [] - - data = get_source(item.url) - patron = '
' - matches = re.compile(patron, re.DOTALL).findall(data) - - infoLabels = item.infoLabels - for scrapedurl in matches: - episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)') - lang = 'VOSE' - season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode)) - title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName) - url = scrapedurl - infoLabels['season'] = season - infoLabels['episode'] = episode - - itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, - action='findvideos', language=lang, infoLabels=infoLabels)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - itemlist = itemlist[::-1] - if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append( - Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, - action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, - extra1='library')) - - return itemlist - - -def findvideos(item): - import urllib - logger.info() - - itemlist = [] - - data = get_source(item.url) - patron = 'id="Opt\d+">.*?src=(.*?) frameborder' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl in matches: - server = '' - scrapedurl = scrapedurl.replace('"', '') - new_data = get_source(scrapedurl) - - if "/stream/" in scrapedurl: - scrapedurl = scrapertools.find_single_match(new_data, '\s*]+>\s*]+>[^>]+>(?P[^<]+)<' - elif item.args == 'raccolta': - patron = r'<a (?:style="[^"]+" )?href="(?P<url>[^"]+)"[^>]+>(?:[^>]+><strong>)?(?P<title>[^<]+)(?:</a>)?</strong' + if 'class="panel"' in data: + item.args = 'raccolta' + patron = r'class="title-episodio">(?P<title>[^<]+)<(?P<url>.*?)<p' + # patron = r'<a (?:style="[^"]+" )?href="(?P<url>[^"]+)"[^>]+>(?:[^>]+><strong>)?(?P<title>[^<]+)(?:</a>)?</strong' + else: + patron = r'<div class="cover-racolta">\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)".*?<p class="title[^>]+>(?P<title>[^<]+)<' else: patron = r'<article[^>]+>[^>]+>[^>]+>(?:<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>)?.*?<a href="(?P<url>[^"]+)">\s*(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<p>(?P<plot>[^<]+)<' patronNext = r'<a class="page-numbers next" href="([^"]+)">' @@ -80,16 +86,23 @@ def peliculas(item): item.action = 'episodios' item.contentSerieName = title item.contentTitle = '' - elif 'collezione' in item.fulltitle.lower(): + elif 'collezion' in item.fulltitle.lower() or \ + 'raccolt' in item.fulltitle.lower() or \ + 'filmografia' in item.fulltitle.lower(): item.args = 'collection' item.action = 'peliculas' item.contentTitle = title item.contentSerieName = '' - elif 'raccolta' in item.fulltitle.lower(): - item.args = 'raccolta' - item.action = 'peliculas' - item.contentTitle = title - item.contentSerieName = '' + # elif 'collezion' in item.fulltitle.lower(): + # item.args = 'collection' + # item.action = 'peliculas' + # item.contentTitle = title + # item.contentSerieName = '' + # elif 'raccolta' in item.fulltitle.lower(): + # item.args = 'collection' + # item.action = 'peliculas' + # item.contentTitle = title + # item.contentSerieName = '' else: item.contentTitle = title item.contentSerieName = ''