\s*]+>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)" % eptype
+ itm.title = support.color(title, 'azure').strip()
+ itm.action = "findvideos"
+ itm.url = url
+ itm.fulltitle = cleantitle
+ itm.extra = extra
+ itm.show = re.sub(r'Episodio\s*', '', title)
+ itm.thumbnail = item.thumbnail
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ return itemlist
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def lista_anime(item, nextpage=True, show_lang=True):
+ logger.info()
+ itemlist = []
+
+ data = httptools.downloadpage(item.url).data
+ blocco = scrapertools.find_single_match(data, r'(.*?)')
+ # patron = r' \s* ]+>' # Patron con thumbnail, Kodi non scarica le immagini dal sito
+ patron = r''
+ matches = re.compile(patron, re.DOTALL).findall(blocco)
+
+ for scrapedurl, scrapedtitle in matches:
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
+ scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
+ # Pulizia titolo
+ scrapedtitle = scrapedtitle.replace("Streaming", "").replace("&", "")
+ scrapedtitle = scrapedtitle.replace("Download", "")
+ lang = scrapertools.find_single_match(scrapedtitle, r"([Ss][Uu][Bb]\s*[Ii][Tt][Aa])")
+ scrapedtitle = scrapedtitle.replace("Sub Ita", "").strip()
+ eptype = scrapertools.find_single_match(scrapedtitle, "((?:Episodio?|OAV))")
+ cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', scrapedtitle)
+
+
+ cleantitle = cleantitle.replace(lang, "").strip()
+
+ itemlist.append(
+ Item(channel=item.channel,
+ action="episodi",
+ contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
+ title=scrapedtitle.replace(lang, "(%s)" % support.color(lang, "red") if show_lang else "").strip(),
+ fulltitle=cleantitle,
+ url=scrapedurl,
+ show=cleantitle,
+ folder=True))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ if nextpage:
+ patronvideos = r''
+ matches = re.compile(patronvideos, re.DOTALL).findall(data)
+
+ if len(matches) > 0:
+ scrapedurl = matches[0]
+ itemlist.append(
+ Item(channel=item.channel,
+ action="lista_anime",
+ title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
+ url=scrapedurl,
+ thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
+ folder=True))
+
+ return itemlist
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def lista_anime_completa(item):
+ logger.info()
+ itemlist = []
+
+ p = 1
+ if '{}' in item.url:
+ item.url, p = item.url.split('{}')
+ p = int(p)
+
+ data = httptools.downloadpage(item.url).data
+ blocco = scrapertools.find_single_match(data, r'')
+ patron = r']+>([^<]+)'
+ matches = re.compile(patron, re.DOTALL).findall(blocco)
+
+ for i, (scrapedurl, scrapedtitle) in enumerate(matches):
+ if (p - 1) * PERPAGE > i: continue
+ if i >= p * PERPAGE: break
+
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
+ cleantitle = scrapedtitle.replace("Sub Ita Streaming", "").replace("Ita Streaming", "")
+
+ itemlist.append(
+ Item(channel=item.channel,
+ action="episodi",
+ contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
+ title=support.color(scrapedtitle, 'azure'),
+ fulltitle=cleantitle,
+ show=cleantitle,
+ url=scrapedurl,
+ folder=True))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ if len(matches) >= p * PERPAGE:
+ scrapedurl = item.url + '{}' + str(p + 1)
+ itemlist.append(
+ Item(channel=item.channel,
+ extra=item.extra,
+ action="lista_anime_completa",
+ title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
+ url=scrapedurl,
+ thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
+ folder=True))
+
+ return itemlist
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def episodi(item):
+ logger.info()
+ itemlist = []
+
+ data = httptools.downloadpage(item.url).data
+
+ patron = ' \s*.*?(.*?).*?\s* | \s* \s*]+>\s*\s*\s* | '
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedtitle, scrapedurl, scrapedimg in matches:
+ if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
+ continue
+ if 'vvvvid' in scrapedurl.lower():
+ itemlist.append(Item(title='I Video VVVVID Non sono supportati'))
+ continue
+
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
+ scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
+ scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
+ itemlist.append(
+ Item(channel=item.channel,
+ action="findvideos",
+ contentType="episode",
+ title=scrapedtitle,
+ url=urlparse.urljoin(host, scrapedurl),
+ fulltitle=item.title,
+ show=scrapedtitle,
+ plot=item.plot,
+ fanart=item.thumbnail,
+ thumbnail=item.thumbnail))
+
+ # Comandi di servizio
+ if config.get_videolibrary_support() and len(itemlist) != 0:
+ itemlist.append(
+ Item(channel=item.channel,
+ title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
+ url=item.url,
+ action="add_serie_to_library",
+ extra="episodios",
+ show=item.show))
+
+ return itemlist
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def findvideos(item):
+ logger.info()
+ itemlist = []
+
+ headers = {'Upgrade-Insecure-Requests': '1',
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
+
+ if item.extra:
+ data = httptools.downloadpage(item.url, headers=headers).data
+ blocco = scrapertools.find_single_match(data, r'%s(.*?) |
' % item.extra)
+ item.url = scrapertools.find_single_match(blocco, r'