diff --git a/channels/animeleggendari.json b/channels/animeleggendari.json index 6371a381..2c7a4812 100644 --- a/channels/animeleggendari.json +++ b/channels/animeleggendari.json @@ -16,22 +16,6 @@ "enabled": false, "visible": false }, - { - "id": "include_in_newest_anime", - "type": "bool", - "label": "Includi in Novità - Anime", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - }, { "id": "checklinks", "type": "bool", @@ -49,15 +33,6 @@ "visible": "eq(-1,true)", "lvalues": [ "1", "3", "5", "10" ] }, - { - "id": "filter_languages", - "type": "list", - "label": "Mostra link in lingua...", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": ["Non filtrare", "IT"] - }, { "id": "autorenumber", "type": "bool", diff --git a/channels/animeleggendari.py b/channels/animeleggendari.py index eb276f46..0b58433b 100644 --- a/channels/animeleggendari.py +++ b/channels/animeleggendari.py @@ -2,20 +2,12 @@ # ------------------------------------------------------------ # Canale per animeleggendari # ------------------------------------------------------------ -""" - DA COMPLETARE - CONTROLLARE -""" -import re -from core import servertools, httptools, scrapertoolsV2, tmdb, support -from core.item import Item -from core.support import log, menu +from core importsupport from lib.js2py.host import jsfunctions -from platformcode import logger, config -from specials import autoplay, autorenumber __channel__ = "animeleggendari" -host = config.get_channel_url(__channel__) +host = support.config.get_channel_url(__channel__) headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], ['Referer', host]] @@ -23,20 +15,16 @@ headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/ list_servers = ['verystream','openload','rapidvideo','streamango'] list_quality = ['default'] -checklinks = config.get_setting('checklinks', 'animeleggendari') -checklinks_number = config.get_setting('checklinks_number', 'animeleggendari') - @support.menu def mainlist(item): - anime = '' - animeSub = [ + anime = [ ('Leggendari', ['/category/anime-leggendari/', 'peliculas']), ('ITA', ['/category/anime-ita/', 'peliculas']), ('SUB-ITA', ['/category/anime-sub-ita/', 'peliculas']), ('Conclusi', ['/category/serie-anime-concluse/', 'peliculas']), - ('in Corso', ['/category/anime-in-corso/', 'peliculas']), + ('in Corso', ['/category/serie-anime-in-corso/', 'last_ep']), ('Genere', ['', 'genres']) ] @@ -44,7 +32,7 @@ def mainlist(item): def search(item, texto): - log(texto) + support.log(texto) item.url = host + "/?s=" + texto try: @@ -54,144 +42,60 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - logger.error("%s" % line) + support.logger.error("%s" % line) return [] -@support.scrape -def last_ep(item): - log('ANIME PER TUTTI') - - action = 'findvideos' - patron = r'(?P[^<]+)<' - patron_block = r'<ul class="mh-tab-content-posts">(.*?)<\/ul>' - -def newest(categoria): - log('ANIME PER TUTTI') - log(categoria) - itemlist = [] - item = Item() - try: - if categoria == "anime": - item.url = host - item.action = "last_ep" - itemlist = last_ep(item) - - if itemlist[-1].action == "last_ep": - itemlist.pop() - # Continua la ricerca in caso di errore - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist @support.scrape def genres(item): - log() - - action = 'peliculas' blacklist = ['Contattaci','Privacy Policy', 'DMCA'] - patron = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<' - patron_block = r'Generi.*?<ul.*?>(.*?)<\/ul>' + patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<' + patronBlock = r'Generi</a>\s*<ul[^>]+>(?P<block>.*?)<\/ul>' + action = 'peliculas' return locals() + +@support.scrape def peliculas(item): - log() - itemlist = [] - + anime = True blacklist = ['top 10 anime da vedere'] - matches, data = support.match(item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+') + if item.url != host: patronBlock = r'<div id="main-content(?P<block>.*?)<aside' + patron = r'<figure class="(?:mh-carousel-thumb|mh-posts-grid-thumb)"> <a class="[^"]+" href="(?P<url>[^"]+)" title="(?P<title>.*?)(?: \((?P<year>\d+)\))? (?:(?P<lang>SUB ITA|ITA))(?: (?P<title2>[Mm][Oo][Vv][Ii][Ee]))?[^"]*"><img[^s]+src="(?P<thumb>[^"]+)"[^>]+' + def itemHook(item): + if 'movie' in item.title.lower(): + item.title = support.re.sub(' - [Mm][Oo][Vv][Ii][Ee]|[Mm][Oo][Vv][Ii][Ee]','',item.title) + item.title += support.typo('Movie','_ () bold') + item.contentType = 'movie' + item.action = 'findvideos' + return item + patronNext = r'<a class="next page-numbers" href="([^"]+)">' + action = 'episodios' + return locals() - for url, title, thumb in matches: - title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace("streaming", "") - lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))") - videoType = '' - if 'movie' in title.lower(): - videoType = ' - (MOVIE)' - if 'ova' in title.lower(): - videoType = ' - (OAV)' - - cleantitle = title.replace(lang, "").replace('(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip() - - if not videoType : - contentType="tvshow" - action="episodios" - else: - contentType="movie" - action="findvideos" - - if not title.lower() in blacklist: - itemlist.append( - Item(channel=item.channel, - action=action, - contentType=contentType, - title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang,'_ [] color kod'), - fulltitle=cleantitle, - show=cleantitle, - url=url, - thumbnail=thumb)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - autorenumber.renumber(itemlist) - support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">') - - return itemlist +@support.scrape def episodios(item): - log() - itemlist = [] + url = item.url + anime = True + patronBlock = r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(?P<block>.*?)</span></a></div>' + patron = r'(?:<a href="(?P<url>[^"]+)"[^>]+>)?<span class="pagelink">(?P<episode>\d+)</span>' + def itemHook(item): + if not item.url: + item.url = url + item.title = support.typo('Episodio ', 'bold') + item.title + return item + return locals() - data = httptools.downloadpage(item.url).data - block = scrapertoolsV2.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>') - - itemlist.append( - Item(channel=item.channel, - action='findvideos', - contentType='episode', - title=support.typo('Episodio 1 bold'), - fulltitle=item.title, - url=item.url, - thumbnail=item.thumbnail)) - - if block: - matches = re.compile(r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>', re.DOTALL).findall(data) - for url, number in matches: - itemlist.append( - Item(channel=item.channel, - action='findvideos', - contentType='episode', - title=support.typo('Episodio ' + number,'bold'), - fulltitle=item.title, - url=url, - thumbnail=item.thumbnail)) - - autorenumber.renumber(itemlist, item) - support.videolibrary - return itemlist def findvideos(item): - log() + support.log() data = '' matches = support.match(item, 'str="([^"]+)"')[0] if matches: for match in matches: - data += str(jsfunctions.unescape(re.sub('@|g','%', match))) + data += str(jsfunctions.unescape(support.re.sub('@|g','%', match))) data += str(match) - log('DATA',data) - if 'animepertutti' in data: - log('ANIMEPERTUTTI!') - else: data = '' - itemlist = support.server(item,data) - - if checklinks: - itemlist = servertools.check_list_links(itemlist, checklinks_number) - - # itemlist = filtertools.get_links(itemlist, item, list_language) - autoplay.start(itemlist, item) - - return itemlist + return support.server(item,data) diff --git a/channels/animesaturn.py b/channels/animesaturn.py index 50c265f4..54c2e4d7 100644 --- a/channels/animesaturn.py +++ b/channels/animesaturn.py @@ -19,7 +19,7 @@ list_quality = ['default', '480p', '720p', '1080p'] def mainlist(item): anime = ['/animelist?load_all=1', - ('Novità',['','newest', 'anime']), + ('Più Votati',['/toplist','menu', 'top']), ('In Corso',['/animeincorso','peliculas','incorso']), ('Ultimi Episodi',['/fetch_pages.php?request=episodes','peliculas','updated'])] @@ -53,16 +53,28 @@ def newest(categoria): return itemlist +@support.scrape +def menu(item): + patronMenu = r'u>(?P<title>[^<]+)<u>(?P<url>.*?)</div> </div>' + action = 'peliculas' + return locals() + @support.scrape def peliculas(item): anime = True + # debug = True if item.args == 'updated': post = "page=" + str(item.page if item.page else 1) if item.page > 1 else None page, data = support.match(item, r'data-page="(\d+)" title="Next">', post=post, headers=headers) patron = r'<img alt="[^"]+" src="(?P<thumb>[^"]+)" [^>]+></div></a>\s*<a href="(?P<url>[^"]+)"><div class="testo">(?P<title>[^\(<]+)(?:(?P<lang>\(([^\)]+)\)))?</div></a>\s*<a href="[^"]+"><div class="testo2">[^\d]+(?P<episode>\d+)</div></a>' if page: nextpage = page action = 'findvideos' + elif item.args == 'top': + # debug = True + data = item.url + patron = r'a href="(?P<url>[^"]+)">[^>]+>(?P<title>[^<\(]+)(?:\((?P<year>[^\)]+)\))?</div></a><div class="numero">(?P<title2>[^<]+)</div>.*?<img alt="[^"]+" src="(?P<thumb>[^"]+)"' + action = 'check' else: pagination = '' if item.args == 'incorso': patron = r'"slider_title" href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+>(?P<title>[^\(<]+)(?:\((?P<year>\d+)\))?</a>' diff --git a/channelselector.py b/channelselector.py index 72443fa7..8e7cc875 100644 --- a/channelselector.py +++ b/channelselector.py @@ -342,9 +342,9 @@ def thumb(itemlist=[], genre=False): 'news':['novità', "novita'", 'aggiornamenti'], 'now_playing':['cinema', 'in sala'], 'channels_anime':['anime'], - 'genres':['genere', 'generi', 'categorie', 'categoria'], - 'channels_action':['azione', 'arti marziali'], + 'genres':['genere', 'generi', 'categorie', 'categoria'], 'channels_animation': ['animazione', 'cartoni', 'cartoon'], + 'channels_action':['azione', 'arti marziali'], 'channels_adventure': ['avventura'], 'channels_biographical':['biografico'], 'channels_comedy':['comico','commedia', 'demenziale'], @@ -363,7 +363,8 @@ def thumb(itemlist=[], genre=False): 'channels_noir':['noir'], 'popular' : ['popolari','popolare', 'più visti'], 'channels_thriller':['thriller'], - 'top_rated' : ['fortunato'], + 'top_rated' : ['fortunato', 'votati'], + 'on_the_air' : ['corso', 'onda'], 'channels_western':['western'], 'channels_vos':['sub','sub-ita'], 'channels_romance':['romantico','sentimentale'], diff --git a/core/support.py b/core/support.py index 941e84da..9d01bf85 100644 --- a/core/support.py +++ b/core/support.py @@ -334,6 +334,7 @@ def scrape(func): if not data: data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"') data = re.sub('\n|\t', ' ', data) + data = re.sub('>\s+<', '> <', data) # replace all ' with " and eliminate newline, so we don't need to worry about log('DATA =', data) diff --git a/servers/netutv.py b/servers/netutv.py index 116f5ce0..3204df2f 100644 --- a/servers/netutv.py +++ b/servers/netutv.py @@ -35,15 +35,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= page_url = page_url.replace('https://waaw.tv/', 'http://hqq.watch/') data = httptools.downloadpage(page_url).data - # ~ logger.debug(data) + logger.debug(data) - js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>") + # js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>") + js_wise = scrapertools.find_single_match(data, "<script>\s*;(eval.*?)\s*</script>") + logger.info('JS_WISE= '+ js_wise) data = jswise(js_wise).replace("\\", "") - # ~ logger.debug(data) + logger.debug(data) alea = str(random.random())[2:] data_ip = httptools.downloadpage('http://hqq.watch/player/ip.php?type=json&rand=%s' % alea).data - # ~ logger.debug(data_ip) + logger.debug(data_ip) json_data_ip = jsontools.load(data_ip) url = scrapertools.find_single_match(data, 'self\.location\.replace\("([^)]+)\)') @@ -51,14 +53,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= url = url.replace('"+data.ip+"', json_data_ip['ip']) url = url.replace('"+need_captcha+"', '0') #json_data_ip['need_captcha']) url = url.replace('"+token', '') - # ~ logger.debug(url) + # logger.info('URL= '+url) + # logger.debug(url) headers = { "User-Agent": 'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.127 Large Screen Safari/533.4 GoogleTV/162671' } data = httptools.downloadpage('http://hqq.watch'+url, headers=headers).data - # ~ logger.debug(data) + # logger.debug(data) codigo_js = scrapertools.find_multiple_matches(data, '<script>document.write\(unescape\("([^"]+)') - # ~ logger.debug(codigo_js) + # logger.debug(codigo_js) js_aux = urllib.unquote(codigo_js[0]) at = scrapertools.find_single_match(js_aux, 'var at = "([^"]+)')