diff --git a/channels/toonitalia.json b/channels/toonitalia.json index 1ba2b01d..a8cd2928 100644 --- a/channels/toonitalia.json +++ b/channels/toonitalia.json @@ -17,22 +17,6 @@ "visible": true }, { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Includi in Novità - Film", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - }, - { "id": "checklinks", "type": "bool", "label": "Verifica se i link esistono", diff --git a/channels/toonitalia.py b/channels/toonitalia.py index 288591e8..ef630d7a 100644 --- a/channels/toonitalia.py +++ b/channels/toonitalia.py @@ -1,16 +1,12 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Ringraziamo Icarus crew # Canale per ToonItalia # ------------------------------------------------------------ import re -from core import scrapertools, httptools, tmdb, support, servertools -from core.item import Item -from platformcode import logger -from specials import autoplay -from platformcode import config +from core import httptools, support +from platformcode import config, logger __channel__ = "toonitalia" host = config.get_channel_url(__channel__) @@ -20,304 +16,25 @@ headers = [['Referer', host]] list_servers = ['wstream', 'openload', 'streamango'] list_quality = ['HD', 'default'] + +@support.menu def mainlist(item): - # Main options - itemlist = [] - support.menu(itemlist, 'Ultimi episodi inseriti bold', 'insert', host, contentType='episode') - support.menu(itemlist, 'Ultime novità bold', 'updates', host, contentType='episode') - support.menu(itemlist, 'Episodi più visti bold', 'most_view', host, contentType='episode') - support.menu(itemlist, 'Anime', 'list', host + '/lista-anime-2/', contentType='episode') - ('Sub-Ita ', ['/lista-anime-sub-ita/', 'list', ]) + top = [('Novità',['', 'peliculas', 'new', 'tvshow']), + ('Aggiornamenti', ['', 'peliculas', 'last', 'tvshow']), + ('Popolari', ['', 'peliculas', 'most_view', 'tvshow'])] tvshow = '/lista-serie-tv/' - support.menu(itemlist, 'Film Animazione bold', 'list', host + '/lista-film-animazione/', contentType="episode", args="film") + anime = '/lista-anime-2/' + animeSub =[('Sub-Ita',['/lista-anime-sub-ita/']), + ('Film Animati',['/lista-film-animazione/','peliculas', 'movie'])] + search = '' - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - return itemlist - -#---------------------------------------------------------------------------------------------------------------------------------------------- - -def insert(item): - logger.info("[toonitalia.py] insert") - itemlist = [] - minpage = 14 - - p = 1 - if '{}' in item.url: - item.url, p = item.url.split('{}') - p = int(p) - - data = httptools.downloadpage(item.url, headers=headers).data - - patron = r'
]+> (.*?)<\/p>'
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- for i, (scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot) in enumerate(matches):
- if (p - 1) * minpage > i: continue
- if i >= p * minpage: break
- scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
-
- itemlist.append(
- Item(channel=__channel__,
- action="episodios",
- contentType="episode",
- title=scrapedtitle,
- fulltitle=scrapedtitle,
- url=scrapedurl,
- show=scrapedtitle,
- thumbnail=scrapedthumbnail,
- plot=scrapedplot,
- folder=True))
-
- if len(matches) >= p * minpage:
- scrapedurl = item.url + '{}' + str(p + 1)
- itemlist.append(
- Item(channel=__channel__,
- args=item.args,
- action="insert",
- title="[COLOR blue][B]Successivo >[/B][/COLOR]",
- url=scrapedurl,
- thumbnail="thumb_next.png",
- folder=True))
-
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
-
- return itemlist
-
-#----------------------------------------------------------------------------------------------------------------------------------------------
-
-def updates(item):
- logger.info("[toonitalia.py] updates")
- itemlist = []
-
- data = httptools.downloadpage(item.url, headers=headers).data
-
- blocco = r'Aggiornamenti(.*?)'
- matches = re.compile(blocco, re.DOTALL).findall(data)
- for scrapedurl in matches:
- blocco = scrapedurl
-
- patron = r'(.*?)'
- matches = re.compile(patron, re.DOTALL).findall(blocco)
-
- for scrapedurl, scrapedtitle in matches:
- scrapedplot = ""
- scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
- itemlist.append(
- Item(channel=__channel__,
- action="episodios",
- contentType="episode",
- title=scrapedtitle,
- fulltitle=scrapedtitle,
- url=scrapedurl,
- show=scrapedtitle,
- plot=scrapedplot))
-
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
- return itemlist
-
-#----------------------------------------------------------------------------------------------------------------------------------------------
-
-def most_view(item):
- logger.info("[toonitalia.py] most_view")
- itemlist = []
-
- data = httptools.downloadpage(item.url, headers=headers).data
-
- blocco = r'I piu visti(.*?)'
- matches = re.compile(blocco, re.DOTALL).findall(data)
- for scrapedurl in matches:
- blocco = scrapedurl
-
- patron = r'([^<]+)'
- matches = re.compile(patron, re.DOTALL).findall(blocco)
-
- for scrapedurl, scrapedtitle in matches:
- scrapedplot = ""
- scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
- itemlist.append(
- Item(channel=__channel__,
- action="episodios",
- contentType="episode",
- title=scrapedtitle,
- fulltitle=scrapedtitle,
- url=scrapedurl,
- show=scrapedtitle,
- plot=scrapedplot))
-
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
- return itemlist
-
-#----------------------------------------------------------------------------------------------------------------------------------------------
-
-def list(item):
- logger.info("[toonitalia.py] list")
- itemlist = []
- minpage = 14
-
- p = 1
- if '{}' in item.url:
- item.url, p = item.url.split('{}')
- p = int(p)
-
- data = httptools.downloadpage(item.url, headers=headers).data
-
- patron = r' ([^<]+) | )(?P]+src="([^"]+)" class[^>]+>.*?'
- patron += r'
([^<]+)
.*?
]+>([^<]+)'
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- if "https://vcrypt.net" in data:
- patron = r'(?:
)([^<]+) – (?P
|