From e85ef540ffdc60aaf7244c75ab7f4d02d231194d Mon Sep 17 00:00:00 2001 From: 4l3x87 <50104109+4l3x87@users.noreply.github.com> Date: Sat, 1 Jun 2019 10:00:22 +0200 Subject: [PATCH] Fix vari (#42) * Improvements channel Guardaserie.click, Fastsubita and support * Refactor channels series with support & fix * Refactor channels series / anime with support & fix New server animeworld.biz * Fix videolibrary update --- channels/animesaturn.py | 196 +++++++++---------------- channels/animeworld.py | 41 +----- channels/fastsubita.py | 14 +- channels/guardaserieclick.py | 14 +- channels/serietvsubita.py | 271 +++++++++++------------------------ channels/serietvu.py | 176 ++++++++--------------- core/support.py | 2 +- servers/animeworld.json | 42 ++++++ servers/animeworld.py | 34 +++++ videolibrary_service.py | 2 +- 10 files changed, 308 insertions(+), 484 deletions(-) create mode 100644 servers/animeworld.json create mode 100644 servers/animeworld.py diff --git a/channels/animesaturn.py b/channels/animesaturn.py index 560ecedc..90edb519 100644 --- a/channels/animesaturn.py +++ b/channels/animesaturn.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ # Canale per AnimeSaturn -# Thanks to me +# Thanks to 4l3x87 # ---------------------------------------------------------- import re @@ -10,6 +10,7 @@ import urlparse import channelselector from core import httptools, tmdb, support, scrapertools, jsontools from core.item import Item +from core.support import log from platformcode import logger, config from specials import autoplay, autorenumber @@ -19,30 +20,19 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() -list_servers = ['openload','fembed'] -list_quality = ['default'] +list_servers = ['openload', 'fembed', 'animeworld'] +list_quality = ['default', '480p', '720p', '1080p'] def mainlist(item): - support.log(item.channel + 'mainlist') + log() itemlist = [] - support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host) - support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'tvshow') - support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host,args=['tvshow','alfabetico']) + support.menu(itemlist, 'Novità bold', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host, 'tvshow') + support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host) + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host, args=['tvshow', 'alfabetico']) support.menu(itemlist, 'Cerca', 'search', host) - - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - itemlist.append( - Item(channel='setting', - action="channel_config", - title=support.typo("Configurazione Canale color lime"), - config=item.channel, - folder=False, - thumbnail=channelselector.get_thumb('setting_0.png')) - ) + support.aplay(item, itemlist, list_servers, list_quality) + support.channel_config(item, itemlist) return itemlist @@ -50,7 +40,7 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x') + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('"', "'") year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') @@ -62,7 +52,7 @@ def cleantitle(scrapedtitle): # ---------------------------------------------------------------------------------------------------------------- def lista_anime(item): - support.log(item.channel + " lista_anime") + log() itemlist = [] PERPAGE = 15 @@ -78,37 +68,33 @@ def lista_anime(item): for i, serie in enumerate(series): matches.append(serie.split('||')) else: - # Carica la pagina - data = httptools.downloadpage(item.url).data - # Estrae i contenuti patron = r']*?>[^>]*?>(.+?)<' - matches = re.compile(patron, re.DOTALL).findall(data) - + matches = support.match(item, patron, headers=headers)[0] scrapedplot = "" scrapedthumbnail = "" for i, (scrapedurl, scrapedtitle) in enumerate(matches): if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break - title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') + title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)') movie = False showtitle = title if '(ITA)' in title: - title = title.replace('(ITA)','').strip() + title = title.replace('(ITA)', '').strip() showtitle = title - title += ' '+support.typo(' (ITA)') + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') infoLabels = {} if 'Akira' in title: movie = True - infoLabels['year']= 1988 + infoLabels['year'] = 1988 if 'Dragon Ball Super Movie' in title: movie = True infoLabels['year'] = 2019 - itemlist.append( Item(channel=item.channel, extra=item.extra, @@ -130,15 +116,7 @@ def lista_anime(item): # Paginazione if len(matches) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_anime', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - thumbnail=support.thumb())) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist @@ -148,17 +126,14 @@ def lista_anime(item): # ---------------------------------------------------------------------------------------------------------------- def episodios(item): - support.log(item.channel + " episodios") + log() itemlist = [] - data = httptools.downloadpage(item.url).data - + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)') - - #movie or series + # movie or series movie = scrapertools.find_single_match(data, r'\Episodi:\s(\d*)\sMovie') - data = httptools.downloadpage( host + "/loading_anime?anime_id=" + anime_id, headers={ @@ -167,7 +142,7 @@ def episodios(item): patron = r'(.+?)\s*' patron += r'0): + if len(episodes) > 0: item.url = episodes[0].url - itemlist = [] - data = httptools.downloadpage(item.url).data + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) patron = r'
' url = scrapertools.find_single_match(data, patron) - - data = httptools.downloadpage(url).data - + data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) itemlist = support.server(item, data=data) - if item.contentType == 'movie': - support.videolibrary(itemlist, item, 'color kod') - # Controlla se i link sono validi - # if checklinks: - # itemlist = servertools.check_list_links(itemlist, checklinks_number) - # - # autoplay.start(itemlist, item) - return itemlist + # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def ultimiep(item): - logger.info(item.channel + "ultimiep") + log() itemlist = [] - post = "page=%s" % item.args['page'] if item.args and item.args['page'] else None + p = 1 + if '{}' in item.url: + item.url, p = item.url.split('{}') + p = int(p) + + post = "page=%s" % p if p > 1 else None data = httptools.downloadpage( item.url, post=post, headers={ @@ -259,14 +229,23 @@ def ultimiep(item): scrapedtitle2 = cleantitle(scrapedtitle2) scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + '' + title = scrapedtitle + showtitle = scrapedtitle + if '(ITA)' in title: + title = title.replace('(ITA)', '').strip() + showtitle = title + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') + + itemlist.append( Item(channel=item.channel, - contentType="tvshow", + contentType="episode", action="findvideos", - title=scrapedtitle, + title=title, url=scrapedurl, fulltitle=scrapedtitle1, - show=scrapedtitle1, + show=showtitle, thumbnail=scrapedthumbnail)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -275,27 +254,17 @@ def ultimiep(item): patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva' next_page = scrapertools.find_single_match(data, patronvideos) if next_page: - itemlist.append( - Item( - channel=item.channel, - action="ultimiep", - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=item.url, - thumbnail= support.thumb(), - args={'page':next_page}, - folder=True)) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + next_page)) return itemlist - # ================================================================================================================ - # ---------------------------------------------------------------------------------------------------------------- def newest(categoria): - logger.info(__channel__ + " newest" + categoria) + log(categoria) itemlist = [] item = Item() item.url = host @@ -323,42 +292,9 @@ def newest(categoria): # ---------------------------------------------------------------------------------------------------------------- def search_anime(item, texto): - logger.info(item.channel + " search_anime: "+texto) + log(texto) itemlist = [] - # data = httptools.downloadpage(host + "/animelist?load_all=1").data - # data = scrapertools.decodeHtmlentities(data) - # - # texto = texto.lower().split('+') - # - # patron = r']*?>[^>]*?>(.+?)<' - # matches = re.compile(patron, re.DOTALL).findall(data) - # - # for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle) - # for scrapedurl, scrapedtitle in matches - # if all(t in scrapedtitle.lower() - # for t in texto)]: - # - # title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') - # showtitle = title - # if '(ITA)' in title: - # title = title.replace('(ITA)','').strip() - # showtitle = title - # title += ' '+support.typo(' [ITA] color kod') - # - # itemlist.append( - # Item( - # channel=item.channel, - # contentType="episode", - # action="episodios", - # title=title, - # url=scrapedurl, - # fulltitle=title, - # show=showtitle, - # thumbnail="")) - # - # tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data jsondata = jsontools.load(data) @@ -368,16 +304,15 @@ def search_anime(item, texto): if 'Anime non esistente' in data: continue else: - title = title.replace('(ita)','(ITA)') + title = title.replace('(ita)', '(ITA)') showtitle = title if '(ITA)' in title: title = title.replace('(ITA)', '').strip() showtitle = title - title += ' ' + support.typo(' (ITA)') + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') url = "%s/anime/%s" % (host, data) - logger.debug(title) - logger.debug(url) itemlist.append( Item( @@ -397,7 +332,7 @@ def search_anime(item, texto): # ---------------------------------------------------------------------------------------------------------------- def search(item, texto): - logger.info(item.channel + " search") + log(texto) itemlist = [] try: @@ -416,23 +351,20 @@ def search(item, texto): def list_az(item): - support.log(item.channel+" list_az") + log() itemlist = [] alphabet = dict() - # Scarico la pagina - data = httptools.downloadpage(item.url).data - # Articoli patron = r']*?>[^>]*?>(.+?)<' - matches = re.compile(patron, re.DOTALL).findall(data) + matches = support.match(item, patron, headers=headers)[0] for i, (scrapedurl, scrapedtitle) in enumerate(matches): letter = scrapedtitle[0].upper() if letter not in alphabet: alphabet[letter] = [] - alphabet[letter].append(scrapedurl+'||'+scrapedtitle) + alphabet[letter].append(scrapedurl + '||' + scrapedtitle) for letter in sorted(alphabet): itemlist.append( @@ -444,4 +376,4 @@ def list_az(item): return itemlist -# ================================================================================================================ \ No newline at end of file +# ================================================================================================================ diff --git a/channels/animeworld.py b/channels/animeworld.py index d5714602..fec6c29f 100644 --- a/channels/animeworld.py +++ b/channels/animeworld.py @@ -18,11 +18,9 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'Italiano'} list_language = IDIOMAS.values() -list_servers = ['diretto'] -list_quality = [] +list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo'] +list_quality = ['default', '480p', '720p', '1080p'] -checklinks = config.get_setting('checklinks', 'animeworld') -checklinks_number = config.get_setting('checklinks_number', 'animeworld') def mainlist(item): @@ -47,19 +45,10 @@ def mainlist(item): def generi(item): log() - itemlist = [] patron_block = r'\sGeneri\s*' patron = r'' - matches = support.match(item,patron, patron_block, headers)[0] - for scrapedurl, scrapedtitle in matches: - itemlist.append(Item( - channel=item.channel, - action="video", - title=scrapedtitle, - url="%s%s" % (host,scrapedurl))) - - return itemlist + return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video') # Crea Menu Filtro ====================================================== @@ -183,7 +172,7 @@ def video(item): log() itemlist = [] - matches, data = support.match(item, r'(.*?)<\/a>') + matches, data = support.match(item, r'(.*?)<\/a>', headers=headers) for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches: # Cerca Info come anno o lingua nel Titolo @@ -231,6 +220,9 @@ def video(item): # Concatena le informazioni + + lang = support.typo('Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else '' + info = ep + lang + year + ova + ona + movie + special # Crea il title da visualizzare @@ -329,25 +321,6 @@ def findvideos(item): videoData +='\n'+json['grabber'] - if serverid == '33': - post = urllib.urlencode({'r': '', 'd': 'www.animeworld.biz'}) - dataJson = httptools.downloadpage(json['grabber'].replace('/v/','/api/source/'),headers=[['x-requested-with', 'XMLHttpRequest']],post=post).data - json = jsontools.load(dataJson) - log(json['data']) - if json['data']: - for file in json['data']: - itemlist.append( - Item( - channel=item.channel, - action="play", - title='diretto', - url=file['file'], - quality=file['label'], - server='directo', - show=item.show, - contentType=item.contentType, - folder=False)) - if serverid == '28': itemlist.append( Item( diff --git a/channels/fastsubita.py b/channels/fastsubita.py index 7c04e499..0dae9dbe 100644 --- a/channels/fastsubita.py +++ b/channels/fastsubita.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Thanks Icarus crew & Alfa addon # Canale per fastsubita +# Thanks Icarus crew & Alfa addon & 4l3x87 # ------------------------------------------------------------ from core import scrapertools, httptools, tmdb, support @@ -17,7 +17,7 @@ list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vi list_quality = ['default'] headers = [ - ['Host', 'fastsubita.com'], + ['Host', host.split("//")[-1].split("/")[0]], ['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'], ['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'], ['Accept-Language', 'en-US,en;q=0.5'], @@ -104,7 +104,9 @@ def pelicuals_tv(item): else: scrapedurl = "http:" + scrapedurl - title = scraped_1 + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " Sub-ITA" + + serie = cleantitle(scraped_1) + title = serie + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod') itemlist.append( Item(channel=item.channel, @@ -115,9 +117,9 @@ def pelicuals_tv(item): url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, - show=scraped_1, + show=serie, extra=item.extra, - contentSerieName=scraped_1, + contentSerieName=serie, contentLanguage='Sub-ITA', infoLabels=infoLabels, folder=True)) @@ -301,7 +303,7 @@ def episodios(item, itemlist=[]): infoLabels = {} infoLabels['season'] = season infoLabels['episode'] = episode[2] - title = infoLabels['season'] + 'x' + infoLabels['episode'] + " Sub-ITA" + title = infoLabels['season'] + 'x' + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod') if "http:" not in scrapedurl: scrapedurl = "http:" + scrapedurl diff --git a/channels/guardaserieclick.py b/channels/guardaserieclick.py index eafae4cb..51ac9fba 100644 --- a/channels/guardaserieclick.py +++ b/channels/guardaserieclick.py @@ -1,18 +1,16 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Canale per guardaserie.click -# Thanks to Icarus crew & Alfa addon +# Canale per Guardaserie.click +# Thanks to Icarus crew & Alfa addon & 4l3x87 # ------------------------------------------------------------ import re -import channelselector -from core import httptools, scrapertools, servertools, support +from core import httptools, scrapertools, support from core import tmdb from core.item import Item from core.support import log from platformcode import logger, config -from specials import autoplay __channel__ = 'guardaserieclick' host = config.get_setting("channel_host", __channel__) @@ -163,7 +161,7 @@ def serietvaggiornate(item): infoLabels['season'] = episode[0][0] title = str( - "%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], contentlanguage)).strip() + "%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], support.typo(contentlanguage, '_ [] color kod') if contentlanguage else '')).strip() itemlist.append( Item(channel=item.channel, @@ -247,7 +245,7 @@ def episodios(item): scrapedepisodetitle = cleantitle(scrapedepisodetitle) title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip() if 'SUB-ITA' in scrapedtitle: - title += " Sub-ITA" + title += " "+support.typo("Sub-ITA", '_ [] color kod') infoLabels = {} infoLabels['season'] = scrapedseason @@ -278,7 +276,7 @@ def episodios(item): # ---------------------------------------------------------------------------------------------------------------- def findepvideos(item): log() - data = httptools.downloadpage(item.url, headers=headers).data + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data matches = scrapertools.find_multiple_matches(data, item.extra) data = "\r\n".join(matches[0]) item.contentType = 'movie' diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py index 3ce72d6b..693e7487 100644 --- a/channels/serietvsubita.py +++ b/channels/serietvsubita.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Canale per Serie Tv Sub ITA -# Thanks to Icarus crew & Alfa addon +# Canale per Serietvsubita +# Thanks to Icarus crew & Alfa addon & 4l3x87 # ---------------------------------------------------------- + import re import time -import channelselector from core import httptools, tmdb, scrapertools, support from core.item import Item +from core.support import log from platformcode import logger, config -from specials import autoplay __channel__ = "serietvsubita" host = config.get_setting("channel_host", __channel__) @@ -18,33 +18,19 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() -list_servers = ['gounlimited','verystream','streamango','openload'] +list_servers = ['gounlimited', 'verystream', 'streamango', 'openload'] list_quality = ['default'] -# checklinks = config.get_setting('checklinks', __channel__) -# checklinks_number = config.get_setting('checklinks_number', __channel__) - def mainlist(item): - support.log(item.channel + 'mainlist') + log() itemlist = [] - support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow') - support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow') - support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie']) - support.menu(itemlist, 'Cerca', 'search', host,'tvshow') - - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - itemlist.append( - Item(channel='setting', - action="channel_config", - title=support.typo("Configurazione Canale color lime"), - config=item.channel, - folder=False, - thumbnail=channelselector.get_thumb('setting_0.png')) - ) + support.menu(itemlist, 'Novità bold', 'peliculas_tv', host, 'tvshow') + support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow') + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie']) + support.menu(itemlist, 'Cerca', 'search', host, 'tvshow') + support.aplay(item, itemlist, list_servers, list_quality) + support.channel_config(item, itemlist) return itemlist @@ -52,20 +38,57 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x').replace('Game of Thrones –','').replace('In The Dark 2019','In The Dark (2019)').strip() + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('Game of Thrones –','')\ + .replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip() year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') - return scrapedtitle.strip() # ================================================================================================================ +# ---------------------------------------------------------------------------------------------------------------- +def findvideos(item): + log() + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) + # recupero il blocco contenente i link + blocco = scrapertools.find_single_match(data, r'
([\s\S.]*?)
([^<]+)' + matches = support.match(item, patron, headers=headers)[0] for i, (scrapedurl, scrapedtitle) in enumerate(matches): scrapedplot = "" @@ -112,90 +132,58 @@ def lista_serie(item): # Paginazione if len(matches) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_serie', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - thumbnail=support.thumb())) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist + # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def episodios(item, itemlist=[]): - support.log(item.channel + " episodios") - # itemlist = [] + log() + patron = r'