diff --git a/plugin.video.alfa/channels/animejl.json b/plugin.video.alfa/channels/animejl.json index aae006fd..9fa69fcd 100644 --- a/plugin.video.alfa/channels/animejl.json +++ b/plugin.video.alfa/channels/animejl.json @@ -4,7 +4,7 @@ "active": true, "adult": false, "language": ["cast", "lat"], - "thumbnail": "https://www.animejl.net/img/Logo.png", + "thumbnail": "https://i.imgur.com/S6foTE9.png", "banner": "", "categories": [ "anime" @@ -27,4 +27,4 @@ "visible": true } ] -} \ No newline at end of file +} diff --git a/plugin.video.alfa/channels/animeshd.py b/plugin.video.alfa/channels/animeshd.py index 18d53b81..12c4778b 100644 --- a/plugin.video.alfa/channels/animeshd.py +++ b/plugin.video.alfa/channels/animeshd.py @@ -113,9 +113,10 @@ def lista(item): patron = 'class="anime">' patron +='
.*?

([^<]+)<\/h2>' matches = re.compile(patron, re.DOTALL).findall(data) - context = renumbertools.context(item) - context2 = autoplay.context - context.extend(context2) + if item.extra != "next": + context = renumbertools.context(item) + context2 = autoplay.context + context.extend(context2) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: url = scrapedurl thumbnail = host + scrapedthumbnail @@ -133,12 +134,12 @@ def lista(item): ' (.*?)

[\s\S]+?[\s\S]+?', re.DOTALL).findall(data) + itemlist = [] + for episode, url, thumbnail,season in matches: + + if item.extra == "watch-series": + scrapedinfo = season.split(' - ') + scrapedtitle = scrapedinfo[0] + season = scrapertools.find_single_match(scrapedinfo[1], 'Season (\d+)') + episode = scrapertools.find_single_match(episode, 'Episode (\d+)') + title = scrapedtitle + " %sx%s" % (season, episode) + else: + scrapedtitle = season + title = scrapedtitle + ' - ' + episode + url = urlparse.urljoin(host, url) + + new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, + contentSerieName=scrapedtitle,) + itemlist.append(new_item) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en') + if url_pagination: + url = urlparse.urljoin(host + item.extra, url_pagination) + title = ">> Pagina Siguiente" + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title=title, url=url, extra=item.extra)) + return itemlist + + +def novedades_cine(item): + logger.info() + data = httptools.downloadpage(item.url).data + url_pagination = scrapertools.find_single_match(data, "