diff --git a/channels/animesaturn.py b/channels/animesaturn.py index 560ecedc..90edb519 100644 --- a/channels/animesaturn.py +++ b/channels/animesaturn.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ # Canale per AnimeSaturn -# Thanks to me +# Thanks to 4l3x87 # ---------------------------------------------------------- import re @@ -10,6 +10,7 @@ import urlparse import channelselector from core import httptools, tmdb, support, scrapertools, jsontools from core.item import Item +from core.support import log from platformcode import logger, config from specials import autoplay, autorenumber @@ -19,30 +20,19 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() -list_servers = ['openload','fembed'] -list_quality = ['default'] +list_servers = ['openload', 'fembed', 'animeworld'] +list_quality = ['default', '480p', '720p', '1080p'] def mainlist(item): - support.log(item.channel + 'mainlist') + log() itemlist = [] - support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host) - support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'tvshow') - support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host,args=['tvshow','alfabetico']) + support.menu(itemlist, 'Novità bold', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host, 'tvshow') + support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host) + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host, args=['tvshow', 'alfabetico']) support.menu(itemlist, 'Cerca', 'search', host) - - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - itemlist.append( - Item(channel='setting', - action="channel_config", - title=support.typo("Configurazione Canale color lime"), - config=item.channel, - folder=False, - thumbnail=channelselector.get_thumb('setting_0.png')) - ) + support.aplay(item, itemlist, list_servers, list_quality) + support.channel_config(item, itemlist) return itemlist @@ -50,7 +40,7 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x') + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('"', "'") year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') @@ -62,7 +52,7 @@ def cleantitle(scrapedtitle): # ---------------------------------------------------------------------------------------------------------------- def lista_anime(item): - support.log(item.channel + " lista_anime") + log() itemlist = [] PERPAGE = 15 @@ -78,37 +68,33 @@ def lista_anime(item): for i, serie in enumerate(series): matches.append(serie.split('||')) else: - # Carica la pagina - data = httptools.downloadpage(item.url).data - # Estrae i contenuti patron = r']*?>[^>]*?>(.+?)<' - matches = re.compile(patron, re.DOTALL).findall(data) - + matches = support.match(item, patron, headers=headers)[0] scrapedplot = "" scrapedthumbnail = "" for i, (scrapedurl, scrapedtitle) in enumerate(matches): if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break - title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') + title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)') movie = False showtitle = title if '(ITA)' in title: - title = title.replace('(ITA)','').strip() + title = title.replace('(ITA)', '').strip() showtitle = title - title += ' '+support.typo(' (ITA)') + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') infoLabels = {} if 'Akira' in title: movie = True - infoLabels['year']= 1988 + infoLabels['year'] = 1988 if 'Dragon Ball Super Movie' in title: movie = True infoLabels['year'] = 2019 - itemlist.append( Item(channel=item.channel, extra=item.extra, @@ -130,15 +116,7 @@ def lista_anime(item): # Paginazione if len(matches) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_anime', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - thumbnail=support.thumb())) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist @@ -148,17 +126,14 @@ def lista_anime(item): # ---------------------------------------------------------------------------------------------------------------- def episodios(item): - support.log(item.channel + " episodios") + log() itemlist = [] - data = httptools.downloadpage(item.url).data - + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)') - - #movie or series + # movie or series movie = scrapertools.find_single_match(data, r'\Episodi:\s(\d*)\sMovie') - data = httptools.downloadpage( host + "/loading_anime?anime_id=" + anime_id, headers={ @@ -167,7 +142,7 @@ def episodios(item): patron = r'(.+?)\s*' patron += r'0): + if len(episodes) > 0: item.url = episodes[0].url - itemlist = [] - data = httptools.downloadpage(item.url).data + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) patron = r'
' url = scrapertools.find_single_match(data, patron) - - data = httptools.downloadpage(url).data - + data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) itemlist = support.server(item, data=data) - if item.contentType == 'movie': - support.videolibrary(itemlist, item, 'color kod') - # Controlla se i link sono validi - # if checklinks: - # itemlist = servertools.check_list_links(itemlist, checklinks_number) - # - # autoplay.start(itemlist, item) - return itemlist + # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def ultimiep(item): - logger.info(item.channel + "ultimiep") + log() itemlist = [] - post = "page=%s" % item.args['page'] if item.args and item.args['page'] else None + p = 1 + if '{}' in item.url: + item.url, p = item.url.split('{}') + p = int(p) + + post = "page=%s" % p if p > 1 else None data = httptools.downloadpage( item.url, post=post, headers={ @@ -259,14 +229,23 @@ def ultimiep(item): scrapedtitle2 = cleantitle(scrapedtitle2) scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + '' + title = scrapedtitle + showtitle = scrapedtitle + if '(ITA)' in title: + title = title.replace('(ITA)', '').strip() + showtitle = title + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') + + itemlist.append( Item(channel=item.channel, - contentType="tvshow", + contentType="episode", action="findvideos", - title=scrapedtitle, + title=title, url=scrapedurl, fulltitle=scrapedtitle1, - show=scrapedtitle1, + show=showtitle, thumbnail=scrapedthumbnail)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -275,27 +254,17 @@ def ultimiep(item): patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva' next_page = scrapertools.find_single_match(data, patronvideos) if next_page: - itemlist.append( - Item( - channel=item.channel, - action="ultimiep", - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=item.url, - thumbnail= support.thumb(), - args={'page':next_page}, - folder=True)) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + next_page)) return itemlist - # ================================================================================================================ - # ---------------------------------------------------------------------------------------------------------------- def newest(categoria): - logger.info(__channel__ + " newest" + categoria) + log(categoria) itemlist = [] item = Item() item.url = host @@ -323,42 +292,9 @@ def newest(categoria): # ---------------------------------------------------------------------------------------------------------------- def search_anime(item, texto): - logger.info(item.channel + " search_anime: "+texto) + log(texto) itemlist = [] - # data = httptools.downloadpage(host + "/animelist?load_all=1").data - # data = scrapertools.decodeHtmlentities(data) - # - # texto = texto.lower().split('+') - # - # patron = r']*?>[^>]*?>(.+?)<' - # matches = re.compile(patron, re.DOTALL).findall(data) - # - # for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle) - # for scrapedurl, scrapedtitle in matches - # if all(t in scrapedtitle.lower() - # for t in texto)]: - # - # title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') - # showtitle = title - # if '(ITA)' in title: - # title = title.replace('(ITA)','').strip() - # showtitle = title - # title += ' '+support.typo(' [ITA] color kod') - # - # itemlist.append( - # Item( - # channel=item.channel, - # contentType="episode", - # action="episodios", - # title=title, - # url=scrapedurl, - # fulltitle=title, - # show=showtitle, - # thumbnail="")) - # - # tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data jsondata = jsontools.load(data) @@ -368,16 +304,15 @@ def search_anime(item, texto): if 'Anime non esistente' in data: continue else: - title = title.replace('(ita)','(ITA)') + title = title.replace('(ita)', '(ITA)') showtitle = title if '(ITA)' in title: title = title.replace('(ITA)', '').strip() showtitle = title - title += ' ' + support.typo(' (ITA)') + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') url = "%s/anime/%s" % (host, data) - logger.debug(title) - logger.debug(url) itemlist.append( Item( @@ -397,7 +332,7 @@ def search_anime(item, texto): # ---------------------------------------------------------------------------------------------------------------- def search(item, texto): - logger.info(item.channel + " search") + log(texto) itemlist = [] try: @@ -416,23 +351,20 @@ def search(item, texto): def list_az(item): - support.log(item.channel+" list_az") + log() itemlist = [] alphabet = dict() - # Scarico la pagina - data = httptools.downloadpage(item.url).data - # Articoli patron = r']*?>[^>]*?>(.+?)<' - matches = re.compile(patron, re.DOTALL).findall(data) + matches = support.match(item, patron, headers=headers)[0] for i, (scrapedurl, scrapedtitle) in enumerate(matches): letter = scrapedtitle[0].upper() if letter not in alphabet: alphabet[letter] = [] - alphabet[letter].append(scrapedurl+'||'+scrapedtitle) + alphabet[letter].append(scrapedurl + '||' + scrapedtitle) for letter in sorted(alphabet): itemlist.append( @@ -444,4 +376,4 @@ def list_az(item): return itemlist -# ================================================================================================================ \ No newline at end of file +# ================================================================================================================ diff --git a/channels/animeworld.py b/channels/animeworld.py index d5714602..f2ef7dd6 100644 --- a/channels/animeworld.py +++ b/channels/animeworld.py @@ -18,11 +18,9 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'Italiano'} list_language = IDIOMAS.values() -list_servers = ['diretto'] -list_quality = [] +list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo'] +list_quality = ['default', '480p', '720p', '1080p'] -checklinks = config.get_setting('checklinks', 'animeworld') -checklinks_number = config.get_setting('checklinks_number', 'animeworld') def mainlist(item): @@ -30,7 +28,6 @@ def mainlist(item): itemlist =[] - support.menu(itemlist, 'Anime bold', 'lista_anime', host+'/az-list') support.menu(itemlist, 'ITA submenu', 'build_menu', host + '/filter?', args=["anime", 'language[]=1']) support.menu(itemlist, 'Sub-ITA submenu', 'build_menu', host + '/filter?', args=["anime", 'language[]=0']) support.menu(itemlist, 'Archivio A-Z submenu', 'alfabetico', host+'/az-list', args=["tvshow","a-z"]) @@ -47,19 +44,10 @@ def mainlist(item): def generi(item): log() - itemlist = [] patron_block = r'\sGeneri\s*' patron = r'' - matches = support.match(item,patron, patron_block, headers)[0] - for scrapedurl, scrapedtitle in matches: - itemlist.append(Item( - channel=item.channel, - action="video", - title=scrapedtitle, - url="%s%s" % (host,scrapedurl))) - - return itemlist + return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video') # Crea Menu Filtro ====================================================== @@ -154,14 +142,14 @@ def lista_anime(item): title = scrapedtitle.replace(year,'').replace(lang,'').strip() original = scrapedoriginal.replace(year,'').replace(lang,'').strip() if lang: lang = support.typo(lang,'_ color kod') - title = '[B]' + title + '[/B]' + lang + original + longtitle = '[B]' + title + '[/B]' + lang + original itemlist.append( Item(channel=item.channel, extra=item.extra, contentType="episode", action="episodios", - title=title, + title=longtitle, url=scrapedurl, thumbnail=scrapedthumb, fulltitle=title, @@ -183,7 +171,7 @@ def video(item): log() itemlist = [] - matches, data = support.match(item, r'(.*?)<\/a>') + matches, data = support.match(item, r']+>(.*?)<\/a>', headers=headers) for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches: # Cerca Info come anno o lingua nel Titolo @@ -231,6 +219,9 @@ def video(item): # Concatena le informazioni + + lang = support.typo('Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else '' + info = ep + lang + year + ova + ona + movie + special # Crea il title da visualizzare @@ -268,7 +259,6 @@ def episodios(item): itemlist = [] data = httptools.downloadpage(item.url).data.replace('\n', '') - data = re.sub(r'>\s*<', '><', data) block1 = scrapertoolsV2.find_single_match(data, r'
(.*?)
') @@ -305,7 +295,7 @@ def findvideos(item): log() itemlist = [] - episode = '1' + episode = '' if item.extra and item.extra['episode']: data = item.extra['data'] @@ -329,25 +319,6 @@ def findvideos(item): videoData +='\n'+json['grabber'] - if serverid == '33': - post = urllib.urlencode({'r': '', 'd': 'www.animeworld.biz'}) - dataJson = httptools.downloadpage(json['grabber'].replace('/v/','/api/source/'),headers=[['x-requested-with', 'XMLHttpRequest']],post=post).data - json = jsontools.load(dataJson) - log(json['data']) - if json['data']: - for file in json['data']: - itemlist.append( - Item( - channel=item.channel, - action="play", - title='diretto', - url=file['file'], - quality=file['label'], - server='directo', - show=item.show, - contentType=item.contentType, - folder=False)) - if serverid == '28': itemlist.append( Item( diff --git a/channels/cineblog01.py b/channels/cineblog01.py index f2c803cc..4a1c9871 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -122,7 +122,7 @@ def last(item): if item.contentType == 'episode': matches = support.match(item, r']+)".*?>([^(:(|[)]+)([^<]+)<\/a>', '
(.*?)<' - matches = re.compile(patron, re.DOTALL).findall(data) + matches, data = support.match(item, r'

(.*?)<', + headers=headers) for scrapedurl, scrapedtitle in matches: scrapedplot = "" @@ -123,7 +104,9 @@ def pelicuals_tv(item): else: scrapedurl = "http:" + scrapedurl - title = scraped_1+" - "+infoLabels['season']+"x"+infoLabels['episode']+" Sub-ITA" + + serie = cleantitle(scraped_1) + title = serie + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod') itemlist.append( Item(channel=item.channel, @@ -134,45 +117,30 @@ def pelicuals_tv(item): url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, - show=scraped_1, + show=serie, extra=item.extra, - contentSerieName=scraped_1, + contentSerieName=serie, contentLanguage='Sub-ITA', infoLabels=infoLabels, folder=True)) - + support.checkHost(item, itemlist) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione - support.nextPage(itemlist,item,data,'([^<]+)<\/a>' - # patron = r'([^<]+)<\/a>' - # matches = re.compile(patron, re.DOTALL).findall(block) - matches = re.compile(r'', re.DOTALL).findall(block) + matches = support.match(Item(), r'', + r'(.*?)', headers, + url="%s/" % host)[0] index = 0 - # for scrapedurl, scrapedtitle in matches: - # scrapedtitle = cleantitle(scrapedtitle) - # if "http:" not in scrapedurl: - # scrapedurl = "http:" + scrapedurl - # - # if ('S' in scrapedtitle.strip().upper()[0] and len(scrapedtitle.strip()) == 3) or '02' == scrapedtitle: - # # itemlist[index -1][0]+='{|}'+scrapedurl - # continue - # - # itemlist.append([scrapedurl,scrapedtitle]) - # index += 1 + for level, cat, title in matches: title = cleantitle(title) url = '%s?cat=%s' % (host, cat) @@ -183,12 +151,11 @@ def serietv(): itemlist.append([url, title]) index += 1 - - logger.debug(itemlist) return itemlist + def lista_serie(item): - logger.info(item.channel+" lista_serie") + log() itemlist = [] p = 1 @@ -196,16 +163,6 @@ def lista_serie(item): item.url, p = item.url.split('{}') p = int(p) - # logger.debug(p) - # Carica la pagina - # data = httptools.downloadpage(item.url, headers=headers).data - # - # block = scrapertools.find_single_match(data,r'
(.*?)
') - # - # # Estrae i contenuti - # # patron = r'
([^<]+)<\/a>' - # patron = r'([^<]+)<\/a>' - # matches = re.compile(patron, re.DOTALL).findall(block) if '||' in item.url: series = item.url.split('\n\n') matches = [] @@ -235,76 +192,42 @@ def lista_serie(item): contentType='episode', originalUrl=scrapedurl, folder=True)) - # ii += 1 + support.checkHost(item, itemlist) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if len(series) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_serie', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - extra=item.extra, - thumbnail=support.thumb())) + next_page = item.url + '{}' + str(p + 1) + support.nextPage(itemlist, item, next_page=next_page) return itemlist + def findvideos(item): - logger.info(item.channel+" findvideos") + log() itemlist = [] - data = httptools.downloadpage(item.url, headers=headers).data - bloque = scrapertools.find_single_match(data, '
(.*?)

\s[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

' + return support.scrape(item, patron, ['url', 'thumb', 'title'], patron_block=patron_block, action='episodios') # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def serietvaggiornate(item): - support.log(item.channel+" serietvaggiornate") + log() itemlist = [] - patron_block = r'(.*?)]+> ]+>[^>]+>' - patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>' + patron_block = r'
(.*?)<\/div><\/div>]+> ]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^(?:<|\()]+)(?:\(([^\)]+)\))?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>' - matches = support.match(item,patron, patron_block, headers)[0] - - for scrapedurl, scrapedthumbnail, scrapedep, scrapedtitle in matches: - episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodioso - scrapedtitle = cleantitle(scrapedtitle) - - contentlanguage = "" - if 'sub-ita' in scrapedep.strip().lower(): - contentlanguage = 'Sub-ITA' - - extra = r']*>' % ( - episode[0][0], episode[0][1].lstrip("0")) - - infoLabels = {} - infoLabels['episode'] = episode[0][1].lstrip("0") - infoLabels['season'] = episode[0][0] - - title = str("%s - %sx%s %s" % (scrapedtitle,infoLabels['season'],infoLabels['episode'],contentlanguage)).strip() - - itemlist.append( - Item(channel=item.channel, - action="findepvideos", - contentType="episode", - title=title, - show=scrapedtitle, - fulltitle=scrapedtitle, - url=scrapedurl, - extra=extra, - thumbnail=scrapedthumbnail, - contentLanguage=contentlanguage, - infoLabels=infoLabels, - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - return itemlist + return support.scrape(item, patron, ['url', 'thumb', 'episode', 'lang', 'title'], patron_block=patron_block, action='findvideos') # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def categorie(item): - support.log(item.channel+" categorie") - itemlist = [] - - matches = support.match(item, r'
  • \s*]+>([^<]+)
  • ', r'(.*?)', headers)[0] - - for scrapedurl, scrapedtitle in matches: - itemlist.append( - Item(channel=item.channel, - action="lista_serie", - title=scrapedtitle, - contentType="tvshow", - url="".join([host, scrapedurl]), - thumbnail=item.thumbnail, - extra="tv", - folder=True)) - - return itemlist + log() + return support.scrape(item, r'
  • \s]+>([^<]+)
  • ', ['url', 'title'], patron_block=r'(.*?)', headers=headers, action="lista_serie") # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def lista_serie(item): - support.log(item.channel+" lista_serie") + log() itemlist = [] - # data = httptools.downloadpage(item.url, headers=headers).data - # - # patron = r'\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' - # blocco = scrapertools.find_single_match(data, - # r'(.*?)') - # matches = re.compile(patron, re.DOTALL).findall(blocco) + patron_block = r'(.*?)' + patron = r'\s[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' - patron_block = r'(.*?)' - patron = r'\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' - - matches, data = support.match(item, patron, patron_block, headers) - - - for scrapedurl, scrapedimg, scrapedtitle in matches: - scrapedtitle = cleantitle(scrapedtitle) - - if scrapedtitle not in ['DMCA','Contatti','Lista di tutte le serie tv']: - itemlist.append( - Item(channel=item.channel, - action="episodios", - contentType="episode", - title=scrapedtitle, - fulltitle=scrapedtitle, - url=scrapedurl, - thumbnail=scrapedimg, - extra=item.extra, - show=scrapedtitle, - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - support.nextPage(itemlist,item,data,r"\s*([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>]+>([^<]+)<[^>]+>[^>]+>[^>]+>' - patron += r'[^<]+[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s*' - patron += r'.*?embed="([^"]+)"\s*.*?embed2="([^"]+)?"\s*.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*' - patron += r'(?:]+>|]+>)?' - # matches = re.compile(patron, re.DOTALL).findall(data) - - # logger.debug(matches) + patron = r'\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>]+>([^<]+)<[^>]+>[^>]+>[^>]+>' + patron += r'[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s' + patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?' + patron += r'(?:]+>|]+>)?' matches = support.match(item, patron, headers=headers)[0] - - for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2,scrapedurl3,scrapedthumbnail,scrapedthumbnail2 in matches: + for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2, scrapedurl3, scrapedthumbnail, scrapedthumbnail2 in matches: scrapedtitle = cleantitle(scrapedtitle) scrapedepisode = scrapedepisode.zfill(2) scrapedepisodetitle = cleantitle(scrapedepisodetitle) title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip() if 'SUB-ITA' in scrapedtitle: - title +=" Sub-ITA" + title += " "+support.typo("Sub-ITA", '_ [] color kod') infoLabels = {} infoLabels['season'] = scrapedseason infoLabels['episode'] = scrapedepisode itemlist.append( - Item(channel=item.channel, - action="findvideos", - title=title, - fulltitle=scrapedtitle, - url=scrapedurl+"\r\n"+scrapedurl2+"\r\n"+scrapedurl3, - contentType="episode", - plot=scrapedplot, - contentSerieName=scrapedserie, - contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '', - infoLabels=infoLabels, - thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail, - folder=True)) + Item(channel=item.channel, + action="findvideos", + title=support.typo(title, 'bold'), + fulltitle=scrapedtitle, + url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3, + contentType="episode", + plot=scrapedplot, + contentSerieName=scrapedserie, + contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '', + infoLabels=infoLabels, + thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail, + folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -315,22 +197,24 @@ def episodios(item): # ---------------------------------------------------------------------------------------------------------------- def findepvideos(item): - support.log(item.channel+" findepvideos") - data = httptools.downloadpage(item.url, headers=headers).data + log() + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data matches = scrapertools.find_multiple_matches(data, item.extra) data = "\r\n".join(matches[0]) item.contentType = 'movie' - itemlist = support.server(item, data=data) - - return itemlist + return support.server(item, data=data) # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def findvideos(item): - support.log(item.channel+" findvideos") - logger.debug(item.url) - itemlist = support.server(item, data=item.url) - - return itemlist + log() + if item.contentType == 'tvshow': + data = httptools.downloadpage(item.url, headers=headers).data + matches = scrapertools.find_multiple_matches(data, item.extra) + data = "\r\n".join(matches[0]) + else: + log(item.url) + data = item.url + return support.server(item, data) diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py index 3ce72d6b..693e7487 100644 --- a/channels/serietvsubita.py +++ b/channels/serietvsubita.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Canale per Serie Tv Sub ITA -# Thanks to Icarus crew & Alfa addon +# Canale per Serietvsubita +# Thanks to Icarus crew & Alfa addon & 4l3x87 # ---------------------------------------------------------- + import re import time -import channelselector from core import httptools, tmdb, scrapertools, support from core.item import Item +from core.support import log from platformcode import logger, config -from specials import autoplay __channel__ = "serietvsubita" host = config.get_setting("channel_host", __channel__) @@ -18,33 +18,19 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() -list_servers = ['gounlimited','verystream','streamango','openload'] +list_servers = ['gounlimited', 'verystream', 'streamango', 'openload'] list_quality = ['default'] -# checklinks = config.get_setting('checklinks', __channel__) -# checklinks_number = config.get_setting('checklinks_number', __channel__) - def mainlist(item): - support.log(item.channel + 'mainlist') + log() itemlist = [] - support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow') - support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow') - support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie']) - support.menu(itemlist, 'Cerca', 'search', host,'tvshow') - - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - itemlist.append( - Item(channel='setting', - action="channel_config", - title=support.typo("Configurazione Canale color lime"), - config=item.channel, - folder=False, - thumbnail=channelselector.get_thumb('setting_0.png')) - ) + support.menu(itemlist, 'Novità bold', 'peliculas_tv', host, 'tvshow') + support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow') + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie']) + support.menu(itemlist, 'Cerca', 'search', host, 'tvshow') + support.aplay(item, itemlist, list_servers, list_quality) + support.channel_config(item, itemlist) return itemlist @@ -52,20 +38,57 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x').replace('Game of Thrones –','').replace('In The Dark 2019','In The Dark (2019)').strip() + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('Game of Thrones –','')\ + .replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip() year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') - return scrapedtitle.strip() # ================================================================================================================ +# ---------------------------------------------------------------------------------------------------------------- +def findvideos(item): + log() + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) + # recupero il blocco contenente i link + blocco = scrapertools.find_single_match(data, r'
    ([\s\S.]*?)
    ([^<]+)' + matches = support.match(item, patron, headers=headers)[0] for i, (scrapedurl, scrapedtitle) in enumerate(matches): scrapedplot = "" @@ -112,90 +132,58 @@ def lista_serie(item): # Paginazione if len(matches) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_serie', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - thumbnail=support.thumb())) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist + # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def episodios(item, itemlist=[]): - support.log(item.channel + " episodios") - # itemlist = [] + log() + patron = r'