# -*- coding: utf-8 -*- # ------------------------------------------------------------ # Canale per SerieHD # ------------------------------------------------------------ from core import scrapertoolsV2, httptools, support from core.item import Item __channel__ = 'seriehd' # host = support.config.get_channel_url(__channel__) # impostati dinamicamente da findhost() host = '' headers = '' def findhost(): data= httptools.downloadpage('https://seriehd.nuovo.link/').data global host, headers host = scrapertoolsV2.find_single_match(data, r'
(?P.*?)</h2>\s*<img src="(?P<thumb>[^"]+)" alt="[^"]*" />\s*<A HREF="(?P<url>[^"]+)">.*?<span class="year">(?:(?P<year>[0-9]{4}))?.*?<span class="calidad">(?:(?P<quality>[A-Z]+))?.*?</span>' patronNext=r'<span class="current">\d+</span><a rel="nofollow" class="page larger" href="([^"]+)">\d+</a>' action='episodios' return locals() @support.scrape def episodios(item): data ='' url = support.match(item, patronBlock=r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">')[1] seasons = support.match(item, r'<a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0] for season_url, season in seasons: season_url = support.urlparse.urljoin(url, season_url) episodes = support.match(item, r'<a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0] for episode_url, episode in episodes: episode_url = support.urlparse.urljoin(url, episode_url) title = season + "x" + episode.zfill(2) data += title + '|' + episode_url + '\n' support.log('DaTa= ',data) patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n' action = 'findvideos' return locals() def findvideos(item): support.log() return support.hdpass_get_servers(item)