# -*- coding: utf-8 -*- # ------------------------------------------------------------ # Canale per Serietvsubita # Thanks to Icarus crew & Alfa addon & 4l3x87 # ---------------------------------------------------------- import re import time from core import httptools, tmdb, scrapertools, support from core.item import Item from core.support import log from platformcode import logger, config host = config.get_channel_url() headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() list_servers = ['gounlimited', 'rapidgator', 'uploadedto', 'vidtome'] list_quality = ['default'] @support.menu def mainlist(item): log() itemlist = [] tvshowSub = [ ('Novità bold',[ '', 'peliculas_tv', '', 'tvshow']), ('Serie TV bold',[ '', 'lista_serie', '', 'tvshow']), ('Per Lettera', ['', 'list_az', 'serie', 'tvshow']) ] cerca = [(support.typo('Cerca...', 'bold'),[ '', 'search', '', 'tvshow'])] ## support.aplay(item, itemlist, list_servers, list_quality) ## support.channel_config(item, itemlist) return locals() # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('Game of Thrones –','')\ .replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip() year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') return scrapedtitle.strip() # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def findvideos(item): log() data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data data = re.sub(r'\n|\t|\s+', ' ', data) # recupero il blocco contenente i link blocco = scrapertools.find_single_match(data, r'
([\s\S.]*?)
([^<]+)' matches = support.match(item, patron=patron, headers=headers).matches for i, (scrapedurl, scrapedtitle) in enumerate(matches): scrapedplot = "" scrapedthumbnail = "" if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break title = cleantitle(scrapedtitle) itemlist.append( Item(channel=item.channel, extra=item.extra, action="episodios", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title, plot=scrapedplot, contentType='episode', originalUrl=scrapedurl, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione if len(matches) >= p * PERPAGE: support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def episodios(item, itemlist=[]): log() patron = r'