diff --git a/channels/0example.py.txt b/channels/0example.py.txt index 5c520f7d..7420bf14 100644 --- a/channels/0example.py.txt +++ b/channels/0example.py.txt @@ -75,7 +75,6 @@ headers = [['Referer', host]] @support.menu def mainlist(item): - support.info(item) # Ordine delle voci # Voce FILM, puoi solo impostare l'url @@ -146,7 +145,6 @@ def mainlist(item): # AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!! @support.scrape def peliculas(item): - support.info(item) #support.dbg() # decommentare per attivare web_pdb action = '' @@ -161,7 +159,6 @@ def peliculas(item): @support.scrape def episodios(item): - support.info(item) #support.dbg() action = '' @@ -178,7 +175,6 @@ def episodios(item): # per genere, per anno, per lettera, per qualità ecc ecc @support.scrape def genres(item): - support.info(item) #support.dbg() action = '' @@ -198,7 +194,7 @@ def genres(item): # e la ricerca porta i titoli mischiati senza poterli distinguere tra loro # andranno modificate anche le def peliculas e episodios ove occorre def select(item): - support.info('select --->', item) + logger.debug() #support.dbg() data = httptools.downloadpage(item.url, headers=headers).data # pulizia di data, in caso commentare le prossime 2 righe @@ -206,7 +202,7 @@ def select(item): data = re.sub(r'>\s+<', '> <', data) block = scrapertools.find_single_match(data, r'') if re.findall('', data, re.IGNORECASE): - support.info('select = ### è una serie ###') + logger.debug('select = ### è una serie ###') return episodios(Item(channel=item.channel, title=item.title, fulltitle=item.fulltitle, @@ -219,7 +215,7 @@ def select(item): ############## Fondo Pagina # da adattare al canale def search(item, text): - support.info('search', item) + logger.debug(text) itemlist = [] text = text.replace(' ', '+') item.url = host + '/index.php?do=search&story=%s&subaction=search' % (text) @@ -232,7 +228,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info('search log:', line) + logger.error('search log:', line) return [] @@ -240,7 +236,7 @@ def search(item, text): # inserire newest solo se il sito ha la pagina con le ultime novità/aggiunte # altrimenti NON inserirlo def newest(categoria): - support.info('newest ->', categoria) + logger.debug(categoria) itemlist = [] item = Item() try: @@ -255,7 +251,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info('newest log: ', {0}.format(line)) + logger.debug('newest log: ', {0}.format(line)) return [] return itemlist @@ -265,5 +261,5 @@ def newest(categoria): # sia per i siti con hdpass #support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True) def findvideos(item): - support.info('findvideos ->', item) + logger.debug() return support.server(item, headers=headers) diff --git a/channels/accuradio.py b/channels/accuradio.py index dec8ac88..9fc879cc 100644 --- a/channels/accuradio.py +++ b/channels/accuradio.py @@ -32,7 +32,7 @@ def mainlist(item): @support.scrape def peliculas(item): - disabletmdb = True + tmdbEnabled = False action = 'playradio' patron = r'data-id="(?P[^"]+)"\s*data-oldid="(?P[^"]+)".*?data-name="(?P[^"]+)(?:[^>]+>){5}<img class="[^"]+"\s*src="(?P<thumb>[^"]+)(?:[^>]+>){6}\s*(?P<plot>[^<]+)' return locals() @@ -66,7 +66,7 @@ def playradio(item): def search(item, text): - support.info(text) + logger.debug(text) item.url = host + '/search/' + text itemlist = [] try: diff --git a/channels/altadefinizione01.py b/channels/altadefinizione01.py index d35e23c6..024cb984 100644 --- a/channels/altadefinizione01.py +++ b/channels/altadefinizione01.py @@ -2,22 +2,11 @@ # ------------------------------------------------------------ # Canale per altadefinizione01 # ------------------------------------------------------------ -""" - - Eccezioni note che non superano il test del canale: - Avvisi: - - L'url si prende da questo file. - - è presente nelle novità-> Film. - - Ulteriori info: - -""" from core import scrapertools, httptools, support from core.item import Item from platformcode import config, logger - # def findhost(url): # data = httptools.downloadpage(url).data # host = scrapertools.find_single_match(data, '<div class="elementor-button-wrapper"> <a href="([^"]+)"') @@ -35,7 +24,7 @@ def mainlist(item): ('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']), ('Ultimi Aggiornati-Aggiunti', ['','peliculas', 'update']), ('Generi', ['', 'genres', 'genres']), - ('Lettera', ['/catalog/a/', 'genres', 'orderalf']), + ('Lettera', ['/catalog/a/', 'genres', 'az']), ('Anni', ['', 'genres', 'years']), ('Sub-ITA', ['/sub-ita/', 'peliculas', 'pellicola']) ] @@ -45,35 +34,32 @@ def mainlist(item): @support.scrape def peliculas(item): - support.info('peliculas', item) -## deflang = 'ITA' action="findvideos" patron = r'<div class="cover boxcaption"> +<h2>\s*<a href="(?P<url>[^"]+)">(?P<title>[^<]+).*?src="(?P<thumb>[^"]+).*?<div class="trdublaj">(?P<quality>[^<]+).*?<span class="ml-label">(?P<year>[0-9]+).*?<span class="ml-label">(?P<duration>[^<]+).*?<p>(?P<plot>[^<]+)' if item.args == "search": patronBlock = r'</script> <div class="boxgrid caption">(?P<block>.*)<div id="right_bar">' - + elif item.args == 'update': patronBlock = r'<div class="widget-title">Ultimi Film Aggiunti/Aggiornati</div>(?P<block>.*?)<div id="alt_menu">' patron = r'style="background-image:url\((?P<thumb>[^\)]+).+?<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> [^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+|N/A).+?>.*?(?:>Film (?P<lang>Sub ITA)</a></p> )?<p>(?P<plot>[^<]+)<.*?href="(?P<url>[^"]+)' - elif item.args == 'orderalf': + elif item.args == 'az': patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"' \ '.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<' \ '[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>' else: patronBlock = r'<div class="cover_kapsul ml-mask">(?P<block>.*)<div class="page_nav">' - patronNext = '<a href="([^"]+)">»' + patronNext = r'<a href="([^"]+)">»' patronTotalPages = r'>(\d+)(?:[^>]+>){3}»' - # debugBlock = True + return locals() @support.scrape def genres(item): - support.info('genres',item) action = "peliculas" blacklist = ['Altadefinizione01'] @@ -83,16 +69,14 @@ def genres(item): elif item.args == 'years': patronBlock = r'<ul class="anno_list">(?P<block>.*?)</li> </ul> </div>' patronMenu = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>' - elif item.args == 'orderalf': + elif item.args == 'az': patronBlock = r'<div class="movies-letter">(?P<block>.*?)<div class="clearfix">' patronMenu = '<a title=.*?href="(?P<url>[^"]+)"><span>(?P<title>.*?)</span>' - #debug = True return locals() @support.scrape -def orderalf(item): - support.info('orderalf',item) +def az(item): action = 'findvideos' patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\ @@ -103,7 +87,7 @@ def orderalf(item): def search(item, text): - support.info(item, text) + logger.debug(text) itemlist = [] @@ -120,7 +104,7 @@ def search(item, text): return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = Item() @@ -142,7 +126,7 @@ def newest(categoria): return itemlist def findvideos(item): - support.info('findvideos', item) + logger.debug() data = httptools.downloadpage(item.url).data iframe = support.match(data, patron='player-container[^>]+>\s*<iframe[^>]+src="([^"]+)').match if iframe: diff --git a/channels/altadefinizione01_link.py b/channels/altadefinizione01_link.py index 185ca179..17bac949 100644 --- a/channels/altadefinizione01_link.py +++ b/channels/altadefinizione01_link.py @@ -15,8 +15,6 @@ headers = [['Referer', host]] # =========== home menu =================== @support.menu def mainlist(item): - support.info('mainlist',item) - film = [ ('Al Cinema', ['/film-del-cinema', 'peliculas', '']), ('Generi', ['', 'genres', 'genres']), @@ -39,7 +37,6 @@ def peliculas(item): # =========== def pagina categorie ====================================== @support.scrape def genres(item): - support.info('genres',item) action = 'peliculas' if item.args == 'genres': @@ -57,7 +54,7 @@ def genres(item): # =========== def per cercare film/serietv ============= #host+/index.php?do=search&story=avatar&subaction=search def search(item, text): - support.info('search', item) + logger.debug(text) itemlist = [] text = text.replace(" ", "+") item.url = host+"/index.php?do=search&story=%s&subaction=search" % (text) @@ -73,7 +70,7 @@ def search(item, text): # =========== def per le novità nel menu principale ============= def newest(categoria): - support.info('newest', categoria) + logger.debug(categoria) itemlist = [] item = Item() try: @@ -95,5 +92,5 @@ def newest(categoria): return itemlist def findvideos(item): - support.info('findvideos', item) + logger.debug('findvideos', item) return support.server(item, support.match(item, patron='<ul class="playernav">.*?</ul>', headers=headers).match) diff --git a/channels/altadefinizioneclick.py b/channels/altadefinizioneclick.py index 5995d8d9..f964579a 100644 --- a/channels/altadefinizioneclick.py +++ b/channels/altadefinizioneclick.py @@ -82,22 +82,22 @@ def genres(item): return locals() -def search(item, texto): - support.info("search ", texto) +def search(item, text): + logger.debug(text) item.args = 'search' - item.url = host + "?s=" + texto + item.url = host + "?s=" + text try: return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = Item() try: diff --git a/channels/altadefinizionecommunity.py b/channels/altadefinizionecommunity.py index bc36707d..8fc7b217 100644 --- a/channels/altadefinizionecommunity.py +++ b/channels/altadefinizionecommunity.py @@ -132,7 +132,7 @@ def peliculas(item): json = {} if item.contentType == 'undefined': - # disabletmdb = True + # tmdbEnabled = False action = 'check' elif item.contentType == 'movie': action = 'findvideos' @@ -159,7 +159,7 @@ def peliculas(item): url = '='.join(spl[:-1]) page = str(int(spl[-1])+1) total_pages = json.get('lastPage', 0) - support.nextPage(itemlist, item, next_page='='.join((url, page)), function_or_level='peliculas', total_pages=total_pages) + support.nextPage(itemlist, item, 'peliculas', next_page='='.join((url, page)), total_pages=total_pages) return itemlist return locals() @@ -176,7 +176,7 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] diff --git a/channels/animealtadefinizione.py b/channels/animealtadefinizione.py index 13287f5c..4c1f7fc4 100644 --- a/channels/animealtadefinizione.py +++ b/channels/animealtadefinizione.py @@ -3,7 +3,9 @@ # Canale per animealtadefinizione # ---------------------------------------------------------- +from platformcode import platformtools from core import support +from platformcode import logger host = support.config.get_channel_url() headers = [['Referer', host]] @@ -33,21 +35,21 @@ def menu(item): return locals() -def search(item, texto): - support.info(texto) - item.search = texto +def search(item, text): + logger.debug(text) + item.search = text try: return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: if categoria == "anime": @@ -58,7 +60,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] @@ -79,10 +81,15 @@ def peliculas(item): else: query='category_name' searchtext = item.url.split('/')[-2] - if not item.pag: item.pag = 1 - # debug = True - anime = True - data = support.match(host + '/wp-admin/admin-ajax.php', post='action=itajax-sort&loop=main+loop&location=&thumbnail=1&rating=1sorter=recent&columns=4&numarticles='+perpage+'&paginated='+str(item.pag)+'¤tquery%5B'+query+'%5D='+searchtext).data.replace('\\','') + + page = 1 if not item.page else item.page + + numerationEnabled = True + post = 'action=itajax-sort&loop=main+loop&location=&thumbnail=1&rating=1sorter=recent&columns=4&numarticles={}&paginated={}¤tquery%5B{}%5D={}'.format(perpage, page, query, searchtext) + res = support.match(host + '/wp-admin/admin-ajax.php', post=post, patron=r'"pages":(\d+)') + data= res.data.replace('\\','') + # item.total_pages = int(res.match) + patron = r'<a href="(?P<url>[^"]+)"><img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)" class="[^"]+" alt="" title="(?P<title>[^"]+?)\s+(?P<type>Movie)?\s*(?P<lang>Sub Ita|Ita)?\s*[sS]treaming' typeContentDict = {'movie':['movie']} typeActionDict = {'findvideos':['movie']} @@ -91,15 +98,14 @@ def peliculas(item): if item.search: itemlist = [ it for it in itemlist if ' Episodio ' not in it.title ] if len(itemlist) == int(perpage): - item.pag += 1 - itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), action='peliculas')) + support.nextPage(itemlist, item, function_or_level='peliculas', page=page + 1, total_pages=int(res.match)) return itemlist return locals() @support.scrape def episodios(item): - anime = True + numerationEnabled = True pagination = int(perpage) patron = epPatron return locals() diff --git a/channels/animeforce.py b/channels/animeforce.py index 8f8c99bb..f5d7b274 100644 --- a/channels/animeforce.py +++ b/channels/animeforce.py @@ -4,6 +4,7 @@ # ------------------------------------------------------------ from core import support +from platformcode import logger host = support.config.get_channel_url() headers = [['Referer', host]] @@ -38,7 +39,7 @@ def submenu(item): def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = support.Item() try: @@ -51,13 +52,13 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist def search(item, text): - support.info('search',text) + logger.debug(text) item.search = text item.url = host + '/lista-anime/' item.contentType = 'tvshow' @@ -67,14 +68,14 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] @support.scrape def peliculas(item): search = item.search - anime = True + numerationEnabled = True if 'movie' in item.url: action = 'findvideos' else: @@ -108,7 +109,7 @@ def check(item): @support.scrape def episodios(item): - anime = True + numerationEnabled = True data = item.data if '<h6>Streaming</h6>' in data: @@ -116,7 +117,6 @@ def episodios(item): else: patron = r'<a\s*href="(?P<url>[^"]+)"\s*title="(?P<title>[^"]+)"\s*class="btn btn-dark mb-1">' def itemHook(item): - support.info(item) if item.url.startswith('//'): item.url= 'https:' + item.url elif item.url.startswith('/'): item.url= 'https:/' + item.url return item @@ -125,7 +125,7 @@ def episodios(item): def findvideos(item): - support.info(item) + logger.debug() itemlist = [] if 'adf.ly' in item.url: diff --git a/channels/animeleggendari.py b/channels/animeleggendari.py index bfd00a61..b24ac957 100644 --- a/channels/animeleggendari.py +++ b/channels/animeleggendari.py @@ -4,6 +4,7 @@ # ------------------------------------------------------------ from core import support +from platformcode import logger from lib.js2py.host import jsfunctions host = support.config.get_channel_url() @@ -30,10 +31,10 @@ def mainlist(item): return locals() -def search(item, texto): - support.info(texto) +def search(item, text): + logger.debug(text) - item.url = host + "/?s=" + texto + item.url = host + "/?s=" + text try: return peliculas(item) @@ -41,7 +42,7 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] @@ -56,7 +57,7 @@ def genres(item): @support.scrape def peliculas(item): - anime = True + numerationEnabled = True blacklist = ['top 10 anime da vedere'] if item.url != host: patronBlock = r'<div id="main-content(?P<block>.*?)<aside' patron = r'<figure class="(?:mh-carousel-thumb|mh-posts-grid-thumb)">\s*<a (?:class="[^"]+" )?href="(?P<url>[^"]+)" title="(?P<title>.*?)(?: \((?P<year>\d+)\))? (?:(?P<lang>SUB ITA|ITA))(?: (?P<title2>[Mm][Oo][Vv][Ii][Ee]))?[^"]*"><img (?:class="[^"]+"|width="[^"]+" height="[^"]+") src="(?P<thumb>[^"]+)"[^>]+' @@ -83,7 +84,7 @@ def peliculas(item): def episodios(item): data = support.match(item, headers=headers, patronBlock=r'entry-content clearfix">(.*?)class="mh-widget mh-posts-2 widget_text').block if not 'pagination clearfix' in data: - support.info('NOT IN DATA') + logger.debug('NOT IN DATA') patron = r'<iframe.*?src="(?P<url>[^"]+)"' title = item.title def fullItemlistHook(itemlist): @@ -100,7 +101,7 @@ def episodios(item): return itemlist else: url = item.url - anime = True + numerationEnabled = True patronBlock = r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(?P<block>.*?)</span></a></div>' patron = r'(?:<a href="(?P<url>[^"]+)"[^>]+>)?<span class="pagelink">(?P<episode>\d+)' def itemHook(item): @@ -124,7 +125,7 @@ def check(item): return data def findvideos(item): - support.info() + logger.debug() if item.data: data = item.data else: diff --git a/channels/animesaturn.py b/channels/animesaturn.py index 1adeca52..c52830f0 100644 --- a/channels/animesaturn.py +++ b/channels/animesaturn.py @@ -3,9 +3,8 @@ # Canale per AnimeSaturn # ---------------------------------------------------------- -from lib import js2py from core import support -from platformcode import config +from platformcode import logger host = support.config.get_channel_url() __channel__ = 'animesaturn' @@ -43,9 +42,9 @@ def mainlist(item): return locals() -def search(item, texto): - support.info(texto) - item.url = host + '/animelist?search=' + texto +def search(item, text): + logger.debug(text) + item.url = host + '/animelist?search=' + text item.contentType = 'tvshow' try: return peliculas(item) @@ -53,12 +52,12 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info() + logger.debug(categoria) itemlist = [] item = support.Item() try: @@ -70,7 +69,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist @@ -110,7 +109,7 @@ def menu(item): @support.scrape def peliculas(item): - anime = True + numerationEnabled = True deflang= 'Sub-ITA' action = 'check' @@ -169,13 +168,13 @@ def check(item): @support.scrape def episodios(item): - if item.contentType != 'movie': anime = True + if item.contentType != 'movie': numerationEnabled = True patron = r'episodi-link-button">\s*<a href="(?P<url>[^"]+)"[^>]+>\s*(?P<title>[^<]+)</a>' return locals() def findvideos(item): - support.info() + logger.debug() itemlist = [] links = [] # page_data = '' diff --git a/channels/animeunity.py b/channels/animeunity.py index fc0f997f..2176e700 100644 --- a/channels/animeunity.py +++ b/channels/animeunity.py @@ -4,11 +4,8 @@ # ------------------------------------------------------------ import cloudscraper, json, copy, inspect -from core import jsontools, support, httptools, filetools +from core import jsontools, support from platformcode import autorenumber, logger -import re -import xbmc - session = cloudscraper.create_scraper() @@ -54,7 +51,7 @@ def menu(item): def genres(item): - support.info() + logger.debug() # support.dbg() itemlist = [] @@ -66,7 +63,7 @@ def genres(item): return support.thumb(itemlist) def years(item): - support.info() + logger.debug() itemlist = [] from datetime import datetime @@ -80,7 +77,7 @@ def years(item): def search(item, text): - support.info('search', item) + logger.debug(text) if not item.args: item.args = {'title':text} else: @@ -93,12 +90,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.debug('search log:', line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = support.Item() item.url = host @@ -112,13 +109,13 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info(line) + logger.debug(line) return [] return itemlist def news(item): - support.info() + logger.debug() item.contentType = 'episode' itemlist = [] import cloudscraper @@ -143,7 +140,7 @@ def news(item): def peliculas(item): - support.info() + logger.debug() itemlist = [] page = item.page if item.page else 0 @@ -196,7 +193,7 @@ def peliculas(item): return itemlist def episodios(item): - support.info() + logger.debug() itemlist = [] title = 'Parte ' if item.type.lower() == 'movie' else 'Episodio ' for it in item.episodes: diff --git a/channels/animeuniverse.py b/channels/animeuniverse.py index 1716f062..f605c064 100644 --- a/channels/animeuniverse.py +++ b/channels/animeuniverse.py @@ -4,6 +4,7 @@ # ---------------------------------------------------------- from core import support +from platformcode import logger host = support.config.get_channel_url() headers = {} @@ -34,21 +35,21 @@ def menu(item): return locals() -def search(item, texto): - support.info(texto) - item.search = texto +def search(item, text): + logger.debug(text) + item.search = text try: return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: if categoria == "anime": @@ -59,7 +60,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] @@ -85,7 +86,7 @@ def peliculas(item): searchtext = item.url.split('/')[-2] if item.url != host else '' if not item.pag: item.pag = 1 - anime=True + numerationEnabled = False # blacklist=['Altri Hentai'] data = support.match(host + '/wp-content/themes/animeuniverse/functions/ajax.php', post='sorter=recent&location=&loop=main+loop&action=sort&numarticles='+perpage+'&paginated='+str(item.pag)+'¤tquery%5B'+query+'%5D='+searchtext+'&thumbnail=1').data.replace('\\','') patron=r'<a href="(?P<url>[^"]+)"><img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)" class="[^"]+" alt="" title="(?P<title>.*?)\s*(?P<lang>Sub ITA|ITA)?(?:"| \[)' @@ -102,7 +103,7 @@ def peliculas(item): @support.scrape def episodios(item): - anime = True + numerationEnabled = True pagination = int(perpage) patron = epPatron return locals() diff --git a/channels/animeworld.py b/channels/animeworld.py index 3660b6d2..5cb7fee4 100644 --- a/channels/animeworld.py +++ b/channels/animeworld.py @@ -5,6 +5,7 @@ # ---------------------------------------------------------- from core import httptools, support, jsontools +from platformcode import logger host = support.config.get_channel_url() __channel__ = 'animeworld' @@ -86,7 +87,7 @@ def submenu(item): def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: if categoria == "anime": @@ -97,12 +98,12 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] def search(item, texto): - support.info(texto) + logger.debug(texto) if item.search: item.url = host + '/filter?dub=' + item.args + '&keyword=' + texto + '&sort=' else: @@ -115,13 +116,13 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] @support.scrape def peliculas(item): - anime = True + numerationEnabled = True # debug = True if item.args not in ['noorder', 'updated'] and not item.url[-1].isdigit(): item.url += order() # usa l'ordinamento di configura canale data = get_data(item) @@ -153,20 +154,20 @@ def peliculas(item): @support.scrape def episodios(item): data = get_data(item) - anime = True - pagination = 50 + numerationEnabled = True + # pagination = 50 patronBlock= r'<div class="server\s*active\s*"(?P<block>.*?)(?:<div class="server|<link)' patron = r'<li[^>]*>\s*<a.*?href="(?P<url>[^"]+)"[^>]*>(?P<episode>[^-<]+)(?:-(?P<episode2>[^<]+))?' - def itemHook(item): - item.title = item.fulltitle - return item + # def itemHook(item): + # item.title = item.fulltitle + # return item action='findvideos' return locals() def findvideos(item): import time - support.info(item) + logger.debug() itemlist = [] urls = [] # resp = support.match(get_data(item), headers=headers, patron=r'data-name="(\d+)">([^<]+)<') @@ -186,8 +187,7 @@ def findvideos(item): title = support.match(url, patron=r'http[s]?://(?:www.)?([^.]+)', string=True).match itemlist.append(item.clone(action="play", title=title, url=url, server='directo')) else: - dataJson = support.match(host + '/api/episode/info?id=' + epID + '&alt=0', headers=headers).data - json = jsontools.load(dataJson) + json = support.match(host + '/api/episode/info?id=' + epID + '&alt=0', headers=headers).response.json title = support.match(json['grabber'], patron=r'server\d+.([^.]+)', string=True).match if title: itemlist.append(item.clone(action="play", title=title, url=json['grabber'].split('=')[-1], server='directo')) else: urls.append(json['grabber']) diff --git a/channels/aniplay.py b/channels/aniplay.py index dd43e9dc..5f53c3f6 100644 --- a/channels/aniplay.py +++ b/channels/aniplay.py @@ -93,7 +93,7 @@ def submenu_top_of(item): def search(item, texto): - support.info(texto) + logger.debug(texto) item.url = host + '/api/anime/advanced-search' item.variable = '&query=' + texto @@ -103,12 +103,12 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: if categoria == "anime": @@ -117,7 +117,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] diff --git a/channels/casacinema.py b/channels/casacinema.py index f2eef748..a05b03d5 100644 --- a/channels/casacinema.py +++ b/channels/casacinema.py @@ -5,6 +5,7 @@ from core import support +from platformcode import logger host = support.config.get_channel_url() headers = [['Referer', host]] @@ -38,17 +39,17 @@ def genres(item): def select(item): item.data = support.match(item).data if 'continua con il video' in item.data.lower(): - support.info('select = ### è un film ###') + logger.debug('select = ### è un film ###') item.contentType = 'movie' return findvideos(item) else: - support.info('select = ### è una serie ###') + logger.debug('select = ### è una serie ###') item.contentType = 'tvshow' return episodios(item) def search(item, text): - support.info(text) + logger.debug(text) text = text.replace(' ', '+') item.url = host + '/?s=' + text item.args = 'search' @@ -58,7 +59,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.error(line) return [] @@ -86,7 +87,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info("%s" % line) + logger.error(line) return [] return itemlist @@ -148,7 +149,7 @@ def findvideos(item): data = '' from lib.unshortenit import unshorten_only for link in links: - support.info('URL=',link) + logger.debug('URL=',link) url, c = unshorten_only(link.replace('#', 'speedvideo.net')) data += url + '\n' return support.server(item, data) diff --git a/channels/cb01anime.py b/channels/cb01anime.py index 79ed89d0..24dec2f2 100644 --- a/channels/cb01anime.py +++ b/channels/cb01anime.py @@ -5,6 +5,7 @@ # ------------------------------------------------------------ from core import support +from platformcode import logger host = support.config.get_channel_url() + '/cb01-anime-cartoon' @@ -37,19 +38,19 @@ def menu(item): def search(item, texto): - support.info(texto) + logger.debug(texto) item.url = host + "/search/" + texto try: return peliculas(item) except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.error(line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = support.Item() try: @@ -61,7 +62,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist @@ -93,7 +94,7 @@ def check(item): @support.scrape def episodios(item): - support.info('EPISODIOS ', item.data) + logger.debug('EPISODIOS ', item.data) data = '' matches = item.data season = 1 diff --git a/channels/cineblog01.py b/channels/cineblog01.py index 07f00962..b605f54b 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -55,7 +55,7 @@ def menu(item): def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: @@ -133,60 +133,66 @@ def peliculas(item): return locals() -@support.scrape + def episodios(item): @support.scrape - def folder(item, data): - """ - Quando c'è un link ad una cartella contenente più stagioni - """ + def listed(item, data): actLike = 'episodios' - addVideolibrary = False - downloadEnabled = False + disableAll = True - folderUrl = scrapertools.find_single_match(data, r'TUTTA L[EA] \w+\s+(?:–|-)\s+<a href="?([^" ]+)') - data = httptools.downloadpage(folderUrl, disable_directIP=True).data - patron = r'<td>(?P<title>[^<]+)<td><a [^>]+href="(?P<url>[^"]+)[^>]+>' - sceneTitle = True - # debug = True + patronBlock = r'(?P<block>sp-head[^>]+>\s*(?:STAGION[EI]\s*(?:(?:DA)?\s*[0-9]+\s*A)?\s*[0-9]+|MINISSERIE)(?::\s*PARTE\s*[0-9]+)? - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?<\/div>.*?)spdiv[^>]*>' + patron = r'(?:/>|<p>|<strong>)(?P<other>.*?(?P<episode>[0-9]+(?:×|×)[0-9]+)\s*(?P<title2>.*?)?(?:\s*–|\s*-|\s*<).*?)(?:<\/p>|<br)' + + return locals() + + @support.scrape + def folder(item, data): + # Quando c'è un link ad una cartella contenente più stagioni + + actLike = 'episodios' + disableAll = True + sceneTitle = True + + folderUrl = scrapertools.find_single_match(data, r'TUTT[EA] L[EA] \w+\s+(?:–|-)\s+<a href="?([^" ]+)') + data = httptools.downloadpage(folderUrl, disable_directIP=True).data + patron = r'<td>(?P<title>[^<]+)<td><a [^>]+href="(?P<url>[^"]+)[^>]+>' - def itemHook(item): - item.serieFolder = True - return item return locals() - # debugBlock=True data = support.match(item.url, headers=headers).data - folderItemlist = folder(item, data) if '<p>TUTTA L' in data else [] + itemlist = listed(item, data) + if not item.itemlist: + itemlist.extend(folder(item, data) if 'TUTTE LE' in data or 'TUTTA LA' in data else []) - patronBlock = r'(?P<block>sp-head[^>]+>\s*(?:STAGION[EI]\s*(?:(?:DA)?\s*[0-9]+\s*A)?\s*[0-9]+|MINISSERIE)(?::\s*PARTE\s*[0-9]+)? - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?<\/div>.*?)spdiv[^>]*>' - patron = r'(?:/>|<p>|<strong>)(?P<other>.*?(?P<episode>[0-9]+(?:×|×)[0-9]+)\s*(?P<title2>.*?)?(?:\s*–|\s*-|\s*<).*?)(?:<\/p>|<br)' - def itemlistHook(itemlist): - title_dict = {} - itlist = [] - for i in itemlist: - i.url = item.url - i.title = re.sub(r'\.(\D)',' \\1', i.title) - match = support.match(i.title, patron=r'(\d+.\d+)').match.replace('x','') - i.order = match - if match not in title_dict: - title_dict[match] = i - elif match in title_dict and i.contentLanguage == title_dict[match].contentLanguage \ - or i.contentLanguage == 'ITA' and not title_dict[match].contentLanguage \ - or title_dict[match].contentLanguage == 'ITA' and not i.contentLanguage: - title_dict[match].url = i.url - else: - title_dict[match + '1'] = i + itemDict = {'ITA':{}, 'Sub-ITA':{}} + seasons = [] - for key, value in title_dict.items(): - itlist.append(value) + for it in itemlist: + if it.contentSeason and it.contentSeason not in seasons: + seasons.append(it.contentSeason) + itemDict['ITA'][it.contentSeason] = [] + itemDict['Sub-ITA'][it.contentSeason] = [] + if it.contentSeason: + itemDict[it.contentLanguage][it.contentSeason].append(it) - itlist = sorted(itlist, key=lambda it: (it.contentLanguage, int(it.order))) - itlist.extend(folderItemlist) + itlist = [] + for season in sorted(seasons): + itlist.extend(sorted(itemDict['ITA'].get(season, []), key=lambda it: (it.contentSeason, it.contentEpisodeNumber))) + itlist.extend(sorted(itemDict['Sub-ITA'].get(season, []), key=lambda it: (it.contentSeason, it.contentEpisodeNumber))) + itemlist = itlist - return itlist - return locals() + + import inspect + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: + if len(seasons) > 1: + itemlist = support.season_pagination(itemlist, item, [], 'episodios') + else: + itemlist = support.pagination(itemlist, item, 'episodios') + support.videolibrary(itemlist, item) + support.download(itemlist, item) + + return itemlist def findvideos(item): diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index 6bb06f10..57319ecc 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -7,7 +7,7 @@ import re from core import httptools, support, scrapertools from core.item import Item -from platformcode import config +from platformcode import config, logger # rimanda a .today che contiene tutti link a .plus @@ -90,13 +90,13 @@ def episodios(item): data=item.data # debug=True if item.args == 'anime': - support.info("Anime :", item) + logger.debug("Anime :", item) # blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo'] patron = r'<a target=(?P<url>[^>]+>(?P<title>Episodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))' patronBlock = r'(?:Stagione (?P<season>\d+))?(?:</span><br />|</span></p>|strong></p>)(?P<block>.*?)(?:<div style="margin-left|<span class="txt_dow">)' # item.contentType = 'tvshow' elif item.args == 'serie': - support.info("Serie :", item) + logger.debug("Serie :", item) patron = r'(?:>| )(?P<episode>\d+(?:x|×|×)\d+)[;]?[ ]?(?:(?P<title>[^<–-]+)(?P<data>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br /|</a></p|$)' patronBlock = r'>(?:[^<]+[Ss]tagione\s|[Ss]tagione [Uu]nica)(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:<strong|<div class="at-below)' # item.contentType = 'tvshow' @@ -122,7 +122,7 @@ def genres(item): def search(item, texto): - support.info(item.url,texto) + logger.debug(item.url,texto) texto = texto.replace(' ', '+') item.url = host + "/?s=" + texto # item.contentType = 'tv' @@ -133,11 +133,11 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.info("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info('newest ->', categoria) + logger.debug('newest ->', categoria) itemlist = [] item = Item() item.args = 'newest' @@ -151,14 +151,14 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info('newest log: ', (line)) + logger.error('newest log: ', (line)) return [] return itemlist def check(item): - support.info() + logger.debug() data = support.match(item.url, headers=headers).data if data: ck = support.match(data, patron=r'Supportaci condividendo quest[oa] ([^:]+)').match.lower() @@ -192,6 +192,6 @@ def check(item): def findvideos(item): - support.info() + logger.debug() item.data = item.data.replace('http://rapidcrypt.net/verys/', '').replace('http://rapidcrypt.net/open/', '') #blocca la ricerca return support.server(item, data=item.data) diff --git a/channels/cinetecadibologna.py b/channels/cinetecadibologna.py index 7c46505f..0e1b8b5b 100644 --- a/channels/cinetecadibologna.py +++ b/channels/cinetecadibologna.py @@ -2,9 +2,9 @@ # ------------------------------------------------------------ # Canale per cinetecadibologna # ------------------------------------------------------------ -from core.item import Item from core import support +from platformcode import logger host = support.config.get_channel_url() @@ -34,7 +34,7 @@ def menu(item): def search(item, text): - support.info(text) + logger.debug(text) item.args = 'noorder' item.url = host + '/ricerca/type_ALL/ricerca_' + text item.contentType = 'movie' @@ -44,7 +44,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] @@ -62,7 +62,7 @@ def peliculas(item): def findvideos(item): - support.info() + logger.debug() itemlist = [] matches = support.match(item, patron=r'filename: "(.*?)"').matches diff --git a/channels/discoveryplus.py b/channels/discoveryplus.py index b7ebf85d..4a42b049 100644 --- a/channels/discoveryplus.py +++ b/channels/discoveryplus.py @@ -55,7 +55,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return itemlist diff --git a/channels/dreamsub.py b/channels/dreamsub.py index 915debdb..162839b8 100644 --- a/channels/dreamsub.py +++ b/channels/dreamsub.py @@ -4,14 +4,13 @@ # ------------------------------------------------------------ from core import support +from platformcode import logger host = support.config.get_channel_url() headers = [['Referer', host]] @support.menu def mainlist(item): - support.info(item) - anime = ['/search?typeY=tv', ('Movie', ['/search?typeY=movie', 'peliculas', '', 'movie']), ('OAV', ['/search?typeY=oav', 'peliculas', '', 'tvshow']), @@ -37,7 +36,6 @@ def menu(item): patronGenreMenu = patronMenu def itemHook(item): - support.info(item.type) for Type, ID in support.match(item.other, patron=r'data-type="([^"]+)" data-id="([^"]+)"').matches: item.url = host + '/search?' + Type + 'Y=' + ID return item @@ -45,7 +43,7 @@ def menu(item): def search(item, text): - support.info(text) + logger.debug(text) text = text.replace(' ', '+') item.url = host + '/search/' + text @@ -56,12 +54,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.error('search log:', line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: if categoria == "anime": @@ -72,7 +70,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] @@ -80,7 +78,7 @@ def newest(categoria): @support.scrape def peliculas(item): # debug = True - anime = True + numerationEnabled = True if 'movie' in item.url: item.contentType = 'movie' action = 'findvideos' @@ -105,7 +103,7 @@ def peliculas(item): @support.scrape def episodios(item): - anime = True + numerationEnabled = True pagination = 100 if item.data: @@ -118,7 +116,7 @@ def episodios(item): def findvideos(item): itemlist = [] - support.info() + logger.debug() # support.dbg() matches = support.match(item, patron=r'href="([^"]+)"', patronBlock=r'<div style="white-space: (.*?)<div id="main-content"') @@ -131,7 +129,7 @@ def findvideos(item): if 'vvvvid' in matches.data: itemlist.append(item.clone(action="play", title='VVVVID', url=support.match(matches.data, patron=r'(http://www.vvvvid[^"]+)').match, server='vvvvid')) else: - support.info('VIDEO') + logger.debug('VIDEO') for url in matches.matches: lang = url.split('/')[-2] if 'ita' in lang.lower(): diff --git a/channels/dsda.py b/channels/dsda.py index f50a133d..e0d511ab 100644 --- a/channels/dsda.py +++ b/channels/dsda.py @@ -33,7 +33,7 @@ def menu(item): return locals() def newest(categoria): - support.info() + logger.debug() item = Item() try: if categoria == "documentales": @@ -45,12 +45,12 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] def search(item, texto): - support.info(texto) + logger.debug(texto) item.url = host + "/?s=" + texto try: return peliculas(item) @@ -133,7 +133,7 @@ def episodios(item): def findvideos(item): - support.info() + logger.debug() if item.args == 'raccolta' or item.contentType == 'episode': return support.server(item, item.url) else: diff --git a/channels/eurostreaming.py b/channels/eurostreaming.py index 22d7b8c6..a2cac7c5 100644 --- a/channels/eurostreaming.py +++ b/channels/eurostreaming.py @@ -4,8 +4,9 @@ # by Greko # ------------------------------------------------------------ -from core import httptools, support +from core import support from core.item import Item +from platformcode import logger # def findhost(url): # permUrl = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers @@ -17,7 +18,6 @@ headers = [['Referer', host]] @support.menu def mainlist(item): - support.info() tvshow = [] anime = ['/category/anime-cartoni-animati/'] mix = [('Aggiornamenti {bullet bold} {TV}', ['/aggiornamento-episodi/', 'peliculas', 'newest']), @@ -58,10 +58,10 @@ def episodios(item): return locals() -def search(item, texto): - support.info() +def search(item, text): + logger.debug(text) - item.url = "%s/?s=%s" % (host, texto) + item.url = "%s/?s=%s" % (host, text) item.contentType = 'tvshow' try: @@ -71,12 +71,12 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.info(line) + logger.error(line) return [] def newest(categoria): - support.info() + logger.debug() itemlist = [] item = Item() @@ -90,12 +90,12 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist def findvideos(item): - support.info() + logger.debug() return support.server(item, item.data) diff --git a/channels/fastsubita.py b/channels/fastsubita.py index c3cfe75f..f8ef1b89 100644 --- a/channels/fastsubita.py +++ b/channels/fastsubita.py @@ -16,10 +16,9 @@ - SOLO SUB-ITA """ -from core import support, httptools, scrapertools +from core import support, httptools from core.item import Item -from core.support import info -from platformcode import config +from platformcode import config, logger host = config.get_channel_url() headers = [['Referer', host]] @@ -40,7 +39,6 @@ def mainlist(item): @support.scrape def peliculas(item): - support.info(item) # support.dbg() deflang = 'Sub-ITA' @@ -97,13 +95,12 @@ def episodios_args(item): @support.scrape def episodios(item): - support.info(item) return episodios_args(item) @support.scrape def genres(item): - support.info() + logger.debug() #support.dbg() action = 'peliculas' @@ -120,7 +117,7 @@ def genres(item): def search(item, text): - support.info('search', item) + logger.debug('search', text) text = text.replace(' ', '+') item.url = host + '?s=' + text try: @@ -131,12 +128,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info('search log:', line) + logger.error('search log:', line) return [] def newest(categoria): - support.info('newest ->', categoria) + logger.debug('newest ->', categoria) itemlist = [] item = Item() if categoria == 'series': @@ -153,14 +150,14 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info('newest log: ', line) + logger.error('newest log: ', line) return [] return itemlist def findvideos(item): - support.info('findvideos ->', item) + logger.debug('findvideos ->', item) patron = r'<a href="([^"]+)">' itemlist = [] diff --git a/channels/filmigratis.py b/channels/filmigratis.py index 22ec19da..beca156e 100644 --- a/channels/filmigratis.py +++ b/channels/filmigratis.py @@ -15,7 +15,7 @@ import re from core import httptools, support from core.item import Item -from platformcode import config +from platformcode import config, logger host = config.get_channel_url() @@ -41,7 +41,6 @@ def mainlist(item): @support.scrape def peliculas(item): - support.info() if item.args == 'search': action = '' @@ -88,8 +87,6 @@ def peliculas(item): @support.scrape def episodios(item): - support.info() - action = 'findvideos' patronBlock = r'<div class="row">(?P<block>.*?)<section class="main-content">' patron = r'href="(?P<url>.*?)">(?:.+?)?\s+S(?P<season>\d+)\s\-\sEP\s(?P<episode>\d+)[^<]+<' @@ -98,8 +95,6 @@ def episodios(item): @support.scrape def genres(item): - support.info() - if item.contentType == 'movie': action = 'peliculas' patron = r'<a href="(?P<url>.*?)">(?P<title>.*?)<' @@ -115,7 +110,7 @@ def genres(item): def search(item, text): - support.info('search', item) + logger.debug('search', text) text = text.replace(' ', '+') item.url = host + '/search/?s=' + text @@ -126,11 +121,11 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.error('search log:', line) return [] def newest(categoria): - support.info('newest ->', categoria) + logger.debug('newest ->', categoria) itemlist = [] item = Item() try: @@ -146,11 +141,11 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info({0}.format(line)) + logger.error({0}.format(line)) return [] return itemlist def findvideos(item): - support.info() + logger.debug() return support.server(item) diff --git a/channels/guardaseriecam.py b/channels/guardaseriecam.py index cdab2da3..b2c24273 100644 --- a/channels/guardaseriecam.py +++ b/channels/guardaseriecam.py @@ -11,7 +11,6 @@ # possibilità di miglioramento: inserire menu per genere - lista serie tv e gestire le novità from core import support -from core.support import info from platformcode import logger, config host = config.get_channel_url() @@ -51,7 +50,7 @@ def episodios(item): def search(item, text): - support.info('search', text) + logger.debug('search', text) item.contentType = 'tvshow' itemlist = [] text = text.replace(' ', '+') @@ -61,7 +60,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info('search log:', line) + logger.error('search log:', line) return [] diff --git a/channels/guardaserieclick.py b/channels/guardaserieclick.py index ac1cdace..486fc364 100644 --- a/channels/guardaserieclick.py +++ b/channels/guardaserieclick.py @@ -16,8 +16,7 @@ from core import support from core.item import Item -from platformcode import config -from core.support import info +from platformcode import config, logger host = config.get_channel_url() headers = [['Referer', host]] @@ -38,7 +37,7 @@ def mainlist(item): ##@support.scrape ##def peliculas(item): #### import web_pdb; web_pdb.set_trace() -## info('peliculas ->\n', item) +## logger.debug('peliculas ->\n', item) ## ## action = 'episodios' ## block = r'(?P<block>.*?)<div\s+class="btn btn-lg btn-default btn-load-other-series">' @@ -75,7 +74,7 @@ def mainlist(item): @support.scrape def peliculas(item): ## import web_pdb; web_pdb.set_trace() - info('peliculas ->\n', item) + logger.debug('peliculas ->\n', item) action = 'episodios' blacklist = ['DMCA'] @@ -120,7 +119,7 @@ def peliculas(item): @support.scrape def episodios(item): - info() + logger.debug() action = 'findvideos' patron = r'<div class="number-episodes-on-img">\s?\d+.\d+\s?(?:\((?P<lang>[a-zA-Z\-]+)\))?</div>.+?(?:<span class="pull-left bottom-year">(?P<title2>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]+)<[^>]+>[^>]+>[^>]+>\s?)?<span(?: meta-nextep="[^"]+")? class="[^"]+" meta-serie="(?P<title>[^"]+)" meta-stag="(?P<season>\d+)" meta-ep="(?P<episode>\d+)" meta-embed="(?P<url>[^>]+)">' @@ -136,7 +135,7 @@ def episodios(item): @support.scrape def genres(item): - info() + logger.debug() action = 'peliculas' patronMenu = r'<li>\s<a\shref="(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)</a></li>' @@ -146,7 +145,7 @@ def genres(item): def search(item, text): - info(text) + logger.debug(text) item.url = host + "/?s=" + text item.contentType = 'tvshow' item.args = 'search' @@ -156,12 +155,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - info() + logger.debug() itemlist = [] item = Item() item.contentType = 'tvshow' @@ -176,12 +175,12 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - info("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist def findvideos(item): - info('--->', item) + logger.debug('--->', item) return support.server(item, item.url) diff --git a/channels/guardaserieicu.py b/channels/guardaserieicu.py index 4d60f951..113916f9 100644 --- a/channels/guardaserieicu.py +++ b/channels/guardaserieicu.py @@ -10,9 +10,9 @@ # possibilità di miglioramento: gestire le novità (sezione Ultimi episodi sul sito) -from core.support import info + from core import support -from platformcode import config +from platformcode import config, logger host = config.get_channel_url() headers = [['Referer', host]] @@ -40,7 +40,7 @@ def episodios(item): return locals() def search(item, text): - info(text) + logger.debug(text) item.contentType = 'tvshow' item.url = host + "/?s=" + text try: @@ -49,11 +49,11 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info("%s" % line) + logger.error("%s" % line) return [] def findvideos(item): - support.info('findvideos', item) + logger.debug('findvideos', item) data = support.match(item, headers=headers, patron=r'div class="movieplay">([^>]+)').matches return support.server(item, data=data ) \ No newline at end of file diff --git a/channels/hd4me.py b/channels/hd4me.py index 2f0e35e9..bb92ccfa 100644 --- a/channels/hd4me.py +++ b/channels/hd4me.py @@ -4,6 +4,7 @@ # ------------------------------------------------------------ from core import support +from platformcode import logger host = support.config.get_channel_url() @@ -43,7 +44,7 @@ def genre(item): def search(item, text): - support.info(text) + logger.debug(text) item.url = host + '/?s=' + text try: return peliculas(item) @@ -51,7 +52,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("search except: %s" % line) + logger.error("search except: %s" % line) return [] diff --git a/channels/ilcorsaronero.py b/channels/ilcorsaronero.py index fcaab674..a198582e 100644 --- a/channels/ilcorsaronero.py +++ b/channels/ilcorsaronero.py @@ -4,6 +4,7 @@ # ------------------------------------------------------------ from core import support +from platformcode import logger # def findhost(url): # data = support.httptools.downloadpage(url).data @@ -11,7 +12,7 @@ from core import support # return url[:-1] if url.endswith('/') else url host = support.config.get_channel_url() -support.info('HOST',host) +logger.debug('HOST',host) # host = 'https://ilcorsaronero.xyz' headers = [['Referer', host]] @@ -65,7 +66,7 @@ def peliculas(item): def search(item, text): - support.info(item, text) + logger.debug( text) if 'all' in item.args: item.url += text else: @@ -76,7 +77,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("search except: %s" % line) + logger.error("search except: %s" % line) return [] diff --git a/channels/ilgeniodellostreaming.py b/channels/ilgeniodellostreaming.py index c17e962d..7520c5a2 100644 --- a/channels/ilgeniodellostreaming.py +++ b/channels/ilgeniodellostreaming.py @@ -6,16 +6,14 @@ from core import support -from core.support import info from core.item import Item -from platformcode import config +from platformcode import config, logger host = config.get_channel_url() headers = [['Referer', host]] @support.menu def mainlist(item): - support.info(item) film = ['/film/', ('Generi',['', 'genres', 'genres']), @@ -46,7 +44,7 @@ def mainlist(item): @support.scrape def peliculas(item): - info() + logger.debug() # debugBlock = True # debug=True @@ -96,7 +94,7 @@ def peliculas(item): @support.scrape def episodios(item): - info() + logger.debug() patronBlock = r'<h1>.*?[ ]?(?:\[(?P<lang>.+?\]))?</h1>.+?<div class="se-a" style="display:block">\s*<ul class="episodios">(?P<block>.*?)</ul>\s*</div>\s*</div>\s*</div>\s*</div>\s*</div>' patron = r'<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)">.*?'\ @@ -108,7 +106,7 @@ def episodios(item): @support.scrape def genres(item): - info(item) + logger.debug(item) action='peliculas' if item.args == 'genres': @@ -126,7 +124,7 @@ def genres(item): def search(item, text): - info(text) + logger.debug(text) import uuid text = text.replace(' ', '+') item.url = host + '/?' + uuid.uuid4().hex + '=' + uuid.uuid4().hex + '&s=' + text @@ -136,12 +134,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - info(categoria) + logger.debug(categoria) itemlist = [] item = Item() @@ -162,14 +160,14 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - info("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist def findvideos(item): - info() + logger.debug() matches = support.match(item, patron=[r'var ilinks\s?=\s?([^;]+)',r' href="#option-\d">([^\s]+)\s*([^\s]+)']).matches itemlist = [] list_url = [] diff --git a/channels/ilgeniodellostreaming_cam.py b/channels/ilgeniodellostreaming_cam.py index 6b2ef18f..f2561712 100644 --- a/channels/ilgeniodellostreaming_cam.py +++ b/channels/ilgeniodellostreaming_cam.py @@ -5,9 +5,8 @@ from core import support -from core.support import info from core.item import Item -from platformcode import config +from platformcode import config, logger host = config.get_channel_url() headers = [['Referer', host]] @@ -54,7 +53,7 @@ def genres(item): return locals() def search(item, text): - info(text) + logger.debug(text) text = text.replace(' ', '+') item.url = host + "/search/" + text try: @@ -62,12 +61,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - info(categoria) + logger.debug(categoria) itemlist = [] item = Item() @@ -81,14 +80,14 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - info("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist def findvideos(item): - info() + logger.debug() urls = [] data = support.match(item).data urls += support.match(data, patron=r'id="urlEmbed" value="([^"]+)').matches diff --git a/channels/italiaserie.py b/channels/italiaserie.py index 2b554c98..074472e5 100644 --- a/channels/italiaserie.py +++ b/channels/italiaserie.py @@ -71,9 +71,9 @@ def category(item): return locals() -def search(item, texto): - support.info("s=", texto) - item.url = host + "/?s=" + texto +def search(item, text): + logger.debug(text) + item.url = host + "/?s=" + text item.contentType = 'tvshow' try: return peliculas(item) @@ -81,12 +81,12 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.info("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info("newest", categoria) + logger.debug("newest", categoria) itemlist = [] item = Item() try: diff --git a/channels/la7.py b/channels/la7.py index e45ca172..253d8783 100644 --- a/channels/la7.py +++ b/channels/la7.py @@ -5,6 +5,7 @@ import requests from core import support +from platformcode import logger DRM = 'com.widevine.alpha' key_widevine = "https://la7.prod.conax.cloud/widevine/license" @@ -69,6 +70,7 @@ def replay(item): return locals() def search(item, text): + logger.debug(text) item.url = host + '/tutti-i-programmi' item.search = text try: @@ -76,15 +78,15 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.error('search log:', line) return [] @support.scrape def peliculas(item): search = item.search - disabletmdb = True - addVideolibrary = False + tmdbEnabled = False + videlibraryEnabled = False downloadEnabled = False action = 'episodios' patron = r'<a href="(?P<url>[^"]+)"[^>]+><div class="[^"]+" data-background-image="(?P<t>[^"]+)"></div><div class="titolo">\s*(?P<title>[^<]+)<' @@ -110,7 +112,7 @@ def episodios(item): patron = r'(?:<a href="(?P<url>[^"]+)">[^>]+><div class="[^"]+" data-background-image="(?P<t>[^"]*)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?(?:[^>]+>){6}?)\s*(?P<title>[^<]+)<(?:[^>]+>[^>]+>[^>]+><div class="data">(?P<date>[^<]+))?|class="heading">[^>]+>(?P<Title>[^<]+).*?window.shareUrl = "(?P<Url>[^"]+)".*?poster:\s*"(?P<Thumb>[^"]+)", title: "(?P<desc>[^"]+)"' patronNext = r'<a href="([^"]+)">›' - addVideolibrary = False + videlibraryEnabled = False downloadEnabled = False def itemHook(item): @@ -128,7 +130,7 @@ def episodios(item): def play(item): - support.info() + logger.debug() if item.livefilter: for it in live(item): if it.fulltitle == item.livefilter: diff --git a/channels/mediasetplay.py b/channels/mediasetplay.py index 4b2d7a50..6976b625 100644 --- a/channels/mediasetplay.py +++ b/channels/mediasetplay.py @@ -103,7 +103,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] diff --git a/channels/mediasetplay.py.old b/channels/mediasetplay.py.old index d52a75e2..1a0645ee 100644 --- a/channels/mediasetplay.py.old +++ b/channels/mediasetplay.py.old @@ -104,7 +104,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return itemlist diff --git a/channels/metalvideo.py b/channels/metalvideo.py index 0fd57117..7dba79c9 100644 --- a/channels/metalvideo.py +++ b/channels/metalvideo.py @@ -4,6 +4,7 @@ # ------------------------------------------------------------ from core import support, config +from platformcode import logger host = 'https://metalvideo.com' headers = {'X-Requested-With': 'XMLHttpRequest'} @@ -49,7 +50,7 @@ def findvideos(item): def search(item, text): - support.info(text) + logger.debug(text) item.url = host + '/search.php?keywords=' + text + '&video-id=' try: return peliculas(item) @@ -57,5 +58,5 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] diff --git a/channels/mondoserietv.py b/channels/mondoserietv.py index ab812c96..c3d5c750 100644 --- a/channels/mondoserietv.py +++ b/channels/mondoserietv.py @@ -4,6 +4,7 @@ # ---------------------------------------------------------- from core import support +from platformcode import logger host = support.config.get_channel_url() headers = {'Referer': host} @@ -30,7 +31,7 @@ def mainlist(item): def search(item, text): - support.info(text) + logger.debug(text) if item.contentType == 'movie' or item.extra == 'movie': action = 'findvideos' else: @@ -43,12 +44,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: if categoria == "series": @@ -64,14 +65,14 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] @support.scrape def peliculas(item): pagination = '' - anime = True + numerationEnabled = True patronNext = r'href="([^"]+)" title="[^"]+" class="lcp_nextlink"' action = 'findvideos' # debug=True @@ -97,13 +98,13 @@ def peliculas(item): patron = r'<li\s*><a href="(?P<url>[^"]+)" title="(?P<title>.*?)(?:\s(?P<year>\d{4}))?"[^>]*>' if item.contentType == 'tvshow': action = 'episodios' - anime = True + numerationEnabled = True return locals() @support.scrape def episodios(item): - anime = True + numerationEnabled = True pagination = 50 patronBlock = r'<table>(?P<block>.*?)</table>' patron = r'<tr><td><b>(?P<title>(?:\d+)?.*?)\s*(?:(?P<episode>(?:\d+x\d+|\d+)))\s*(?P<title2>[^<]+)(?P<data>.*?)<tr>' diff --git a/channels/piratestreaming.py b/channels/piratestreaming.py index dba3d996..2cf9d96a 100644 --- a/channels/piratestreaming.py +++ b/channels/piratestreaming.py @@ -5,7 +5,8 @@ from core import support -from core.support import config, info +from core.support import config +from platformcode import logger host = config.get_channel_url() headers = [['Referer', host]] @@ -23,7 +24,7 @@ def mainlist(item): def search(item, texto): - info(texto) + logger.debug(texto) item.url = host + "/?s=" + texto try: return peliculas(item) @@ -31,12 +32,12 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = support.Item() try: @@ -56,7 +57,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist diff --git a/channels/polpotv.py b/channels/polpotv.py index 6d1fbae5..ec815d97 100644 --- a/channels/polpotv.py +++ b/channels/polpotv.py @@ -6,7 +6,7 @@ from core import support, jsontools from core.item import Item -from platformcode import config +from platformcode import config, logger import datetime host = config.get_channel_url() @@ -34,7 +34,7 @@ def mainlist(item): return locals() def newest(categoria): - support.info() + logger.debug() item = Item() if categoria == 'peliculas': item.contentType = 'movie' @@ -45,7 +45,7 @@ def newest(categoria): return peliculas(item) def peliculas(item): - support.info() + logger.debug() itemlist = [] data = support.match(item.url, headers=headers).data @@ -67,7 +67,7 @@ def peliculas(item): return itemlist def episodios(item): - support.info() + logger.debug() itemlist = [] data = support.match(item.url, headers=headers).data json_object = jsontools.load(data) @@ -83,7 +83,7 @@ def episodios(item): return itemlist def get_season(item, seas_url, seasonNumber): - support.info() + logger.debug() itemlist = [] data = support.match(seas_url, headers=headers).data json_object = jsontools.load(data) @@ -97,7 +97,7 @@ def get_season(item, seas_url, seasonNumber): return itemlist[::-1] def search(item, texto): - support.info(item.url, "search", texto) + logger.debug(item.url, "search", texto) itemlist=[] try: item.url = host + "/api/movies?originalTitle="+texto+"&translations.name=" +texto @@ -118,12 +118,12 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def search_movie_by_genre(item): - support.info() + logger.debug() itemlist = [] data = support.match(item.url, headers=headers).data json_object = jsontools.load(data) @@ -137,7 +137,7 @@ def search_movie_by_genre(item): def search_movie_by_year(item): - support.info() + logger.debug() now = datetime.datetime.now() year = int(now.year) itemlist = [] @@ -153,7 +153,7 @@ def search_movie_by_year(item): def findvideos(item): - support.info() + logger.debug() itemlist = [] try: data = support.match(item.url, headers=headers).data @@ -175,7 +175,7 @@ def findvideos(item): def get_itemlist_element(element,item): - support.info() + logger.debug() contentSerieName = '' contentTitle ='' try: diff --git a/channels/pufimovies.py b/channels/pufimovies.py index 4795a360..bff19e0b 100644 --- a/channels/pufimovies.py +++ b/channels/pufimovies.py @@ -4,6 +4,7 @@ # ------------------------------------------------------------ from core import support +from platformcode import logger host = support.config.get_channel_url() @@ -38,7 +39,7 @@ def menu(item): def search(item, text): - support.info('search', item) + logger.debug('search', item) itemlist = [] text = text.replace(' ', '+') item.url = host + '/search/keyword/' + text @@ -52,12 +53,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.error('search log:', line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = support.Item() item.url = host @@ -77,7 +78,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] return itemlist @@ -109,6 +110,6 @@ def episodios(item): def findvideos(item): - support.info() + logger.debug() # match = support.match(item, patron='wstream', debug=True) return support.server(item) diff --git a/channels/raiplay.py b/channels/raiplay.py index e969da05..4434d907 100644 --- a/channels/raiplay.py +++ b/channels/raiplay.py @@ -83,7 +83,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] diff --git a/channels/seriehd.py b/channels/seriehd.py index ce8c4954..7f0c5597 100644 --- a/channels/seriehd.py +++ b/channels/seriehd.py @@ -5,6 +5,7 @@ from core import support +from platformcode import logger # def findhost(url): # return support.match(url, patron=r'<h2[^>]+><a href="([^"]+)"').match @@ -27,7 +28,7 @@ def mainlist(item): def search(item, texto): - support.info(texto) + logger.debug(texto) item.contentType = 'tvshow' @@ -38,12 +39,12 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = support.Item() @@ -57,7 +58,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist @@ -142,7 +143,7 @@ def menu(item): def findvideos(item): item.url = item.url.replace('&', '&') - support.info(item) + logger.debug(item) if item.args == 'last': url = support.match(item, patron = r'<iframe id="iframeVid" width="[^"]+" height="[^"]+" src="([^"]+)" allowfullscreen').match matches = support.match(url,patron=r'<a href="([^"]+)">(\d+)<', patronBlock=r'<h3>EPISODIO</h3><ul>(.*?)</ul>').matches diff --git a/channels/serietvonline.py b/channels/serietvonline.py index b2b1e45b..923ff857 100644 --- a/channels/serietvonline.py +++ b/channels/serietvonline.py @@ -15,9 +15,9 @@ Altrimenti: - Prima fare la 'Rinumerazione' dal menu contestuale dal titolo della serie """ -import re -from core import support, httptools, scrapertools -from platformcode import config + +from core import support, scrapertools +from platformcode import config, logger from core.item import Item @@ -31,7 +31,7 @@ headers = [['Referer', host]] @support.menu def mainlist(item): - support.info() + logger.debug() film = ['/ultimi-film-aggiunti/', @@ -56,8 +56,8 @@ def mainlist(item): @support.scrape def peliculas(item): - support.info() - anime = True + logger.debug() + numerationEnabled = True blacklist = ['DMCA', 'Contatti', 'Attenzione NON FARTI OSCURARE', 'Lista Cartoni Animati e Anime'] patronBlock = r'<h1>.+?</h1>(?P<block>.*?)<div class="footer_c">' @@ -79,7 +79,7 @@ def peliculas(item): if not item.args and 'anime' not in item.url: patron = r'<div class="movie">[^>]+>.+?src="(?P<thumb>[^"]+)" alt="[^"]+".+? href="(?P<url>[^"]+)">.*?<h2>(?P<title>[^"]+)</h2>\s?(?:<span class="year">(?P<year>\d+|\-\d+))?<' else: - anime = True + numerationEnabled = True patron = r'(?:<td>)?<a href="(?P<url>[^"]+)"(?:[^>]+)?>\s?(?P<title>[^<]+)(?P<episode>[\d\-x]+)?(?P<title2>[^<]+)?<' else: # SEZIONE FILM @@ -106,8 +106,8 @@ def peliculas(item): @support.scrape def episodios(item): - support.info() - anime = True + logger.debug() + numerationEnabled = True action = 'findvideos' patronBlock = r'<table>(?P<block>.*)<\/table>' patron = r'<tr><td>(?P<title>.*?)?[ ](?:Parte)?(?P<episode>\d+x\d+|\d+)(?:|[ ]?(?P<title2>.+?)?(?:avi)?)<(?P<data>.*?)<\/td><tr>' @@ -121,7 +121,7 @@ def episodios(item): def search(item, text): - support.info("CERCA :" ,text, item) + logger.debug("CERCA :" ,text, item) item.url = "%s/?s=%s" % (host, text) @@ -132,11 +132,11 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.info("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) itemlist = [] item = Item() @@ -155,13 +155,13 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.info("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist def findvideos(item): - support.info() + logger.debug() if item.contentType == 'movie': return support.server(item, headers=headers) else: diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py index 80a99a45..14ff66db 100644 --- a/channels/serietvsubita.py +++ b/channels/serietvsubita.py @@ -9,7 +9,6 @@ import time from core import httptools, tmdb, scrapertools, support from core.item import Item -from core.support import info from platformcode import logger, config host = config.get_channel_url() @@ -21,7 +20,6 @@ list_language = IDIOMAS.values() @support.menu def mainlist(item): - info() itemlist = [] tvshowSub = [ ('Novità {bold}',[ '', 'peliculas_tv', '', 'tvshow']), @@ -52,7 +50,7 @@ def cleantitle(scrapedtitle): # ---------------------------------------------------------------------------------------------------------------- def findvideos(item): - info() + logger.debug() data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data data = re.sub(r'\n|\t|\s+', ' ', data) # recupero il blocco contenente i link @@ -66,8 +64,8 @@ def findvideos(item): episodio = item.infoLabels['episode'] patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio - info(patron) - info(blocco) + logger.debug(patron) + logger.debug(blocco) matches = scrapertools.find_multiple_matches(blocco, patron) if len(matches): @@ -89,7 +87,7 @@ def findvideos(item): # ---------------------------------------------------------------------------------------------------------------- def lista_serie(item): - info() + logger.debug() itemlist = [] PERPAGE = 15 @@ -137,7 +135,7 @@ def lista_serie(item): # ---------------------------------------------------------------------------------------------------------------- def episodios(item, itemlist=[]): - info() + logger.debug() patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?' patron += r'<p><a href="([^"]+)">' @@ -212,7 +210,7 @@ def episodios(item, itemlist=[]): # ---------------------------------------------------------------------------------------------------------------- def peliculas_tv(item): - info() + logger.debug() itemlist = [] patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>' @@ -265,7 +263,7 @@ def peliculas_tv(item): # ---------------------------------------------------------------------------------------------------------------- def newest(categoria): - info(categoria) + logger.debug(categoria) itemlist = [] item = Item() item.url = host @@ -289,7 +287,7 @@ def newest(categoria): # ---------------------------------------------------------------------------------------------------------------- def search(item, texto): - info(texto) + logger.debug(texto) itemlist = [] try: patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>' @@ -313,7 +311,7 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - support.info('search log:', line) + logger.error('search log:', line) return [] return itemlist @@ -325,7 +323,7 @@ def search(item, texto): def list_az(item): - info() + logger.debug() itemlist = [] alphabet = dict() diff --git a/channels/serietvu.py b/channels/serietvu.py index e0c68d55..91702709 100644 --- a/channels/serietvu.py +++ b/channels/serietvu.py @@ -8,10 +8,9 @@ """ import re -from core import support, httptools, scrapertools +from core import support from core.item import Item -from core.support import info -from platformcode import config +from platformcode import config, logger host = config.get_channel_url() headers = [['Referer', host]] @@ -55,8 +54,8 @@ def episodios(item): patron = r'(?:<div class="list (?:active)?")?\s*<a data-id="\d+(?:[ ](?P<lang>[SuUbBiItTaA\-]+))?"(?P<other>[^>]+)>.*?Episodio [0-9]+\s?(?:<br>(?P<title>[^<]+))?.*?Stagione (?P<season>[0-9]+) , Episodio - (?P<episode>[0-9]+).*?<(?P<url>.*?<iframe)' def itemHook(i): for value, season in seasons: - info(value) - info(season) + logger.debug(value) + logger.debug(season) i.title = i.title.replace(value+'x',season+'x') i.other += '\n' + i.url i.url = item.url @@ -74,7 +73,7 @@ def genres(item): def search(item, text): - info(text) + logger.debug(text) item.url = host + "/?s=" + text try: item.contentType = 'tvshow' @@ -83,12 +82,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - info("%s" % line) + logger.debug("%s" % line) return [] def newest(categoria): - info(categoria) + logger.debug(categoria) itemlist = [] item = Item() try: @@ -103,14 +102,14 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - info("{0}".format(line)) + logger.error("{0}".format(line)) return [] return itemlist def findvideos(item): - info(item) + logger.debug(item) if item.args != 'update': return support.server(item, data=item.other) else: diff --git a/channels/streamingaltadefinizione.py b/channels/streamingaltadefinizione.py index f0685434..89b604dc 100644 --- a/channels/streamingaltadefinizione.py +++ b/channels/streamingaltadefinizione.py @@ -4,8 +4,7 @@ # ------------------------------------------------------------ from core import support, httptools -from core.item import Item -from platformcode import config +from platformcode import config, logger import sys if sys.version_info[0] >= 3: from urllib.parse import unquote @@ -32,14 +31,14 @@ def mainlist(item): def search(item, text): - support.info("[streamingaltadefinizione.py] " + item.url + " search " + text) + logger.debug(text) item.url = item.url + "/?s=" + text try: return support.dooplay_search(item) except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] diff --git a/channels/tantifilm.py b/channels/tantifilm.py index 116a5ab8..db9217d6 100644 --- a/channels/tantifilm.py +++ b/channels/tantifilm.py @@ -3,11 +3,9 @@ # Canale per Tantifilm # ------------------------------------------------------------ -from core import scrapertools, httptools, support +from core import support from core.item import Item -from core.support import info -from platformcode import logger -from platformcode import config +from platformcode import logger, config # def findhost(url): @@ -23,7 +21,7 @@ player_iframe = r'<iframe.*?src="([^"]+)"[^>]+></iframe>\s*<div class="player' @support.menu def mainlist(item): - info() + logger.debug() top = [('Generi', ['', 'category'])] film = ['/film', @@ -114,7 +112,7 @@ def category(item): def search(item, texto): - info(texto) + logger.debug(texto) item.url = host + "/?s=" + texto @@ -126,7 +124,7 @@ def search(item, texto): except: import sys for line in sys.exc_info(): - logger.error("%s" % line) + logger.error('{}'.format(line)) return [] @@ -155,17 +153,17 @@ def hdpass(item): def findvideos(item): - info() - support.info("ITEMLIST: ", item) + logger.debug() + logger.debug("ITEMLIST: ", item) data = support.match(item.url, headers=headers).data check = support.match(data, patron=r'<div class="category-film">(.*?)</div>').match if 'sub' in check.lower(): item.contentLanguage = 'Sub-ITA' - support.info("CHECK : ", check) + logger.debug("CHECK : ", check) if 'anime' in check.lower(): item.contentType = 'tvshow' item.data = data - support.info('select = ### è una anime ###') + logger.debug('select = ### è una anime ###') try: return episodios(item) except: @@ -180,7 +178,7 @@ def findvideos(item): # if 'protectlink' in data: # urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"') - # support.info("SONO QUI: ", urls) + # logger.debug("SONO QUI: ", urls) # for url in urls: # url = url.decode('base64') # # tiro via l'ultimo carattere perchè non c'entra @@ -191,7 +189,7 @@ def findvideos(item): # if url: # listurl.add(url) # data += '\n'.join(listurl) - info(data) + logger.debug(data) itemlist = [] # support.dbg() @@ -203,7 +201,7 @@ def findvideos(item): if item.otherLinks: urls += support.match(item.otherLinks, patron=r'href="([^"]+)').matches - info('URLS', urls) + logger.debug('URLS', urls) for u in urls: if 'hdplayer.casa/series/' in u: urls.remove(u) diff --git a/channels/tapmovie.py b/channels/tapmovie.py index e9d2637f..65fd92fa 100644 --- a/channels/tapmovie.py +++ b/channels/tapmovie.py @@ -4,6 +4,7 @@ from core import support, httptools from core.item import Item +from platformcode import logger import sys if sys.version_info[0] >= 3: from concurrent import futures else: from concurrent_py2 import futures @@ -26,7 +27,7 @@ def mainlist(item): def episodios(item): - support.info(item) + logger.debug(item) itemlist = [] with futures.ThreadPoolExecutor() as executor: @@ -53,7 +54,7 @@ def genres(item): def peliculas(item, text=''): - support.info('search', item) + logger.debug('search', item) itemlist = [] filter_type = False if item.genre: diff --git a/channels/toonitalia.py b/channels/toonitalia.py index 2a16dae9..35c37c1f 100644 --- a/channels/toonitalia.py +++ b/channels/toonitalia.py @@ -4,6 +4,7 @@ # ------------------------------------------------------------ from core import scrapertools, support +from platformcode import logger import sys host = support.config.get_channel_url() @@ -24,7 +25,7 @@ def mainlist(item): def search(item, text): - support.info(text) + logger.debug(text) # item.args='search' item.text = text item.url = item.url + '/?a=b&s=' + text.replace(' ', '+') @@ -35,12 +36,12 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] def newest(categoria): - support.info(categoria) + logger.debug(categoria) item = support.Item() try: item.contentType = 'undefined' @@ -51,7 +52,7 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - support.logger.error("{0}".format(line)) + logger.error("{0}".format(line)) return [] @@ -60,7 +61,7 @@ def peliculas(item): # debugBlock = True # debug = True # search = item.text - if item.contentType != 'movie': anime = True + if item.contentType != 'movie': numerationEnabled = True action = 'findvideos' if item.contentType == 'movie' else 'episodios' blacklist = ['-Film Animazione disponibili in attesa di recensione '] @@ -82,7 +83,7 @@ def peliculas(item): patronNext = '<a class="next page-numbers" href="([^"]+)">' def itemHook(item): - support.info(item.title) + logger.debug(item.title) if item.args == 'sub': item.title += support.typo('Sub-ITA', 'bold color kod _ []') item.contentLanguage = 'Sub-ITA' @@ -92,7 +93,7 @@ def peliculas(item): @support.scrape def episodios(item): - anime = True + numerationEnabled = True # debug = True patron = r'>\s*(?:(?P<season>\d+)(?:×|x|×))?(?P<episode>\d+)(?:\s+–\s+)?[ –]+(?P<title2>[^<]+)[ –]+<a (?P<data>.*?)(?:<br|</p)' # data = '' diff --git a/channels/tunein.py b/channels/tunein.py index c70630cc..fb062923 100644 --- a/channels/tunein.py +++ b/channels/tunein.py @@ -28,7 +28,7 @@ def mainlist(item): def radio(item): - support.info() + logger.debug() itemlist = [] data = support.match(item, patron= r'text="(?P<title>[^\("]+)(?:\((?P<location>[^\)]+)\))?" URL="(?P<url>[^"]+)" bitrate="(?P<quality>[^"]+)" reliability="[^"]+" guide_id="[^"]+" subtext="(?P<song>[^"]+)" genre_id="[^"]+" formats="(?P<type>[^"]+)" (?:playing="[^"]+" )?(?:playing_image="[^"]+" )?(?:show_id="[^"]+" )?(?:item="[^"]+" )?image="(?P<thumb>[^"]+)"') if data.matches: @@ -85,7 +85,7 @@ def findvideos(item): def search(item, text): - support.info(text) + logger.debug(text) item.url = host + '/Search.ashx?query=' +text try: return radio(item) diff --git a/channels/vvvvid.py b/channels/vvvvid.py index 5c3b9659..f683773a 100644 --- a/channels/vvvvid.py +++ b/channels/vvvvid.py @@ -3,7 +3,7 @@ # Canale per vvvvid # ---------------------------------------------------------- import requests, sys, inspect -from core import jsontools, support, tmdb +from core import support, tmdb from platformcode import autorenumber, logger, config host = support.config.get_channel_url() @@ -68,7 +68,7 @@ def mainlist(item): def search(item, text): - support.info(text) + logger.debug(text) itemlist = [] if conn_id: if 'film' in item.url: item.contentType = 'movie' @@ -79,7 +79,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - support.logger.error("%s" % line) + logger.error("%s" % line) return [] return itemlist @@ -105,7 +105,7 @@ def peliculas(item): # support.dbg() if not item.args: json_file =loadjs(item.url + 'channel/10005/last/') - support.logger.debug(json_file) + logger.debug(json_file) make_itemlist(itemlist, item, json_file) itemlist = support.pagination(itemlist, item, item.page if item.page else 1, 20) if item.contentType != 'movie': autorenumber.start(itemlist) @@ -241,7 +241,7 @@ def make_itemlist(itemlist, item, data): def loadjs(url): if '?category' not in url: url += '?full=true' - support.info('Json URL;',url) + logger.debug('Json URL;',url) json = current_session.get(url, headers=headers, params=payload).json() return json diff --git a/core/support.py b/core/support.py index 26adacd9..5ae0a100 100755 --- a/core/support.py +++ b/core/support.py @@ -20,9 +20,7 @@ from time import time from core import httptools, scrapertools, servertools, tmdb, channeltools, autoplay from core.item import Item from lib import unshortenit -from platformcode import config -from platformcode.logger import info -from platformcode import logger +from platformcode import config, logger channels_order = {'Rai 1': 1, 'Rai 2': 2, @@ -74,94 +72,431 @@ channels_order = {'Rai 1': 1, 'Rai Radio 2': 999, } +########## MAIN FUNCTION ########## +class scrape: + """https://github.com/kodiondemand/addon/wiki/decoratori#scrape""" -def hdpass_get_servers(item, data=''): - def get_hosts(url, quality): - ret = [] - page = httptools.downloadpage(url, CF=False).data - mir = scrapertools.find_single_match(page, patron_mir) + # Legenda: + # known_keys per i groups nei patron + # known_keys = ['url', 'title', 'title2', 'season', 'episode', 'episode2', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang', 'size', 'seed'] + # url = link relativo o assoluto alla pagina titolo film/serie + # title = titolo Film/Serie/Anime/Altro + # title2 = titolo dell'episodio Serie/Anime/Altro + # season = stagione in formato numerico + # episode = numero episodio, in formato numerico. + # episode2 = numero episodio/i aggiuntivi, in formato numerico. + # thumb = link realtivo o assoluto alla locandina Film/Serie/Anime/Altro + # quality = qualità indicata del video + # year = anno in formato numerico (4 cifre) + # plot = plot del video + # duration = durata del Film/Serie/Anime/Altro + # genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia + # rating = punteggio/voto in formato numerico + # type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito + # lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA. + # size = dimensione del video + # seed = seed del torrent + # AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!! - for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option): - mir_url = scrapertools.decodeHtmlentities(mir_url) - logger.debug(mir_url) - it = hdpass_get_url(item.clone(action='play', quality=quality, url=mir_url))[0] - # it = item.clone(action="play", quality=quality, title=srv, server=srv, url= mir_url) - # if not servertools.get_server_parameters(srv.lower()): it = hdpass_get_url(it)[0] # do not exists or it's empty - ret.append(it) - return ret - # Carica la pagina - itemlist = [] - if 'hdpass' in item.url or 'hdplayer' in item.url: url = item.url - else: - if not data: - data = httptools.downloadpage(item.url, CF=False).data.replace('\n', '') - patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>' - url = scrapertools.find_single_match(data, patron) - url = url.replace("&download=1", "") - if 'hdpass' not in url and 'hdplayer' not in url: return itemlist - if not url.startswith('http'): url = 'https:' + url - item.referer = url + def __init__(self, func): + self.func = func - data = httptools.downloadpage(url, CF=False).data - patron_res = '<div class="buttons-bar resolutions-bar">(.*?)<div class="buttons-bar' - patron_mir = '<div class="buttons-bar hosts-bar">(.*?)(?:<div id="main-player|<script)' - patron_option = r'<a href="([^"]+?)"[^>]+>([^<]+?)</a' + def __call__(self, *args): + self.args = self.func(*args) + self.function = self.func.__name__ if not 'actLike' in self.args else self.args['actLike'] - res = scrapertools.find_single_match(data, patron_res) + # self.args + self.item = self.args['item'] + self.action = self.args.get('action', 'findvideos') + self.search = self.args.get('search', '') + self.lang = self.args.get('deflang', '') - # non threaded for webpdb - # for res_url, res_video in scrapertools.find_multiple_matches(res, patron_option): - # res_url = scrapertools.decodeHtmlentities(res_url) - # itemlist.extend(get_hosts(res_url, res_video)) - # - with futures.ThreadPoolExecutor() as executor: - thL = [] - for res_url, res_video in scrapertools.find_multiple_matches(res, patron_option): - res_url = scrapertools.decodeHtmlentities(res_url) - thL.append(executor.submit(get_hosts, res_url, res_video)) - for res in futures.as_completed(thL): - if res.result(): - itemlist.extend(res.result()) + self.headers = self.args['headers'] if 'headers' in self.args else self.func.__globals__['headers'] if 'headers' in self.func.__globals__ else '' - return server(item, itemlist=itemlist) + self.data = self.args.get('data', '') + self.patronBlock = self.args.get('patronBlock', '') + self.patron = self.args.get('patron', self.args.get('patronMenu', self.args.get('patronGenreMenu', ''))) + + self.patronNext = self.args.get('patronNext', '') + self.patronTotalPages = self.args.get('patronTotalPages', '') + + self.pagination = self.args.get('pagination', False) + self.seasonPagination = self.args.get('seasonPagination', True) + + self.debug = self.args.get('debug', False) + self.debugBlock = self.args.get('debugBlock', False) + + self.blacklist = self.args.get('blacklist', []) + + self.typeActionDict = self.args.get('typeActionDict', {}) + self.typeContentDict = self.args.get('typeContentDict', {}) + + self.sceneTitle = self.args.get('sceneTitle') + self.group = self.args.get('group', False) + self.tmdbEnabled = self.args.get('tmdbEnabled', True) + self.videlibraryEnabled = self.args.get('videlibraryEnabled', True) + self.numerationEnabled = self.args.get('numerationEnabled', False) + self.downloadEnabled = self.args.get('downloadEnabled', True) + + if self.args.get('disableAll', False): + self.videlibraryEnabled = False + self.downloadEnabled = False + self.seasonPagination = False + + # variable + self.pag = self.item.page if self.item.page else 1 + self.itemlist = [] + self.matches = [] + self.seasons = [] + self.itemParams = Item() + self.known_keys = ['url', 'title', 'title2', 'season', 'episode', 'episode2', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang', 'size', 'seed'] + + # run scrape + self._scrape() + return self.itemlist + + def _scrape(self): + + if self.item.itemlist: + scrapingTime = time() + self.itemlist = itemlistdb() + self.seasons = self.item.allSeasons + else: + for n in range(2): + logger.debug('PATRON= ', self.patron) + if not self.data: + page = httptools.downloadpage(self.item.url, headers=self.headers, ignore_response_code=True) + self.item.url = page.url # might be a redirect + self.data = page.data + self.data = html_uniform(self.data) + scrapingTime = time() + if self.patronBlock: + if self.debugBlock: regexDbg(self.item, self.patronBlock, self.headers, self.data) + blocks = scrapertools.find_multiple_matches_groups(self.data, self.patronBlock) + for bl in blocks:self._scrapeBlock(bl) + + elif self.patron: + self._scrapeBlock(self.data) + + if 'itemlistHook' in self.args: + try: + self.itemlist = self.args['itemlistHook'](self.itemlist) + except: + raise logger.ChannelScraperException + + # if url may be changed and channel has findhost to update + if 'findhost' in self.func.__globals__ and not self.itemlist and n == 0: + logger.debug('running findhost ' + self.func.__module__) + ch = self.func.__module__.split('.')[-1] + try: + host = config.get_channel_url(self.func.__globals__['findhost'], ch, True) + parse = list(urlparse.urlparse(self.item.url)) + parse[1] = scrapertools.get_domain_from_url(host) + self.item.url = urlparse.urlunparse(parse) + except: + raise logger.ChannelScraperException + self.data = None + self.itemlist = [] + self.matches = [] + else: + break + + if not self.data: + from platformcode.logger import WebErrorException + raise WebErrorException(urlparse.urlparse(self.item.url)[1], self.item.channel) -def hdpass_get_url(item): - data = httptools.downloadpage(item.url, CF=False).data - src = scrapertools.find_single_match(data, r'<iframe allowfullscreen custom-src="([^"]+)') - if src: item.url = base64.b64decode(src) - else: item.url = scrapertools.find_single_match(data, r'<iframe allowfullscreen src="([^"]+)') - item.url, c = unshortenit.unshorten_only(item.url) - return [item] + if self.group and self.item.grouped or self.args.get('groupExplode'): + import copy + nextargs = copy.copy(self.args) + @scrape + def newFunc(): + return nextargs + nextargs['item'] = nextPage(self.itemlist, self.item, self.function, data=self.data, patron=self.patronNext, patron_total_pages=self.patronTotalPages) + nextargs['group'] = False + if nextargs['item']: + nextargs['groupExplode'] = True + self.itemlist.pop() # remove next page just added + self.itemlist.extend(newFunc()) + else: + nextargs['groupExplode'] = False + nextargs['item'] = self.item + self.itemlist = newFunc() + self.itemlist = [i for i in self.itemlist if i.action not in ['add_movie_to_library', 'add_serie_to_library']] + + if not self.group and not self.args.get('groupExplode') and ((self.pagination and len(self.matches) <= self.pag * self.pagination) or not self.pagination): # next page with pagination + if self.patronNext and inspect.stack()[1][3] not in ['newest'] and len(inspect.stack()) > 2 and inspect.stack()[2][3] not in ['get_channel_results']: + nextPage(self.itemlist, self.item, self.function, data=self.data, patron=self.patronNext, patron_total_pages=self.patronTotalPages) + + if self.numerationEnabled and inspect.stack()[1][3] not in ['find_episodes']: + from platformcode import autorenumber + if self.function == 'episodios': + autorenumber.start(self.itemlist, self.item) + + for i in self.itemlist: + if i.contentSeason and i.contentSeason not in self.seasons: + self.seasons.append(i.contentSeason) + + else: autorenumber.start(self.itemlist) -def color(text, color): - return "[COLOR " + color + "]" + text + "[/COLOR]" + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: + if len(self.seasons) > 1 and self.seasonPagination: + self.itemlist = season_pagination(self.itemlist, self.item, self.seasons, self.function) + elif self.pagination or self.seasonPagination: + self.itemlist = pagination(self.itemlist, self.item, self.function) + + if self.action != 'play' and 'patronMenu' not in self.args and 'patronGenreMenu' not in self.args and self.tmdbEnabled and inspect.stack()[1][3] not in ['add_tvshow'] and self.function not in ['episodios', 'mainlist'] or (self.function in ['episodios'] and config.get_setting('episode_info')): # and function != 'episodios' and item.contentType in ['movie', 'tvshow', 'episode', 'undefined'] + tmdb.set_infoLabels_itemlist(self.itemlist, seekTmdb=True) + + if inspect.stack()[1][3] not in ['find_episodes', 'add_tvshow']: + if self.videlibraryEnabled and (self.item.infoLabels["title"] or self.item.fulltitle): + # item.fulltitle = item.infoLabels["title"] + videolibrary(self.itemlist, self.item, function=self.function) + if self.downloadEnabled and self.function == 'episodios' or self.function == 'findvideos': + download(self.itemlist, self.item, function=self.function) + + if 'patronGenreMenu' in self.args and self.itemlist: + self.itemlist = thumb(self.itemlist, mode='genre') + if 'patronMenu' in self.args and self.itemlist: + self.itemlist = thumb(self.itemlist) + + if 'fullItemlistHook' in self.args: + try: + self.itemlist = self.args['fullItemlistHook'](self.itemlist) + except: + raise logger.ChannelScraperException -def search(channel, item, texto): - info(item.url + " search " + texto) - item.url = channel.host + "/?s=" + texto - try: - return channel.peliculas(item) - # Continua la ricerca in caso di errore - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] + if config.get_setting('trakt_sync'): + from core import trakt_tools + trakt_tools.trakt_check(self.itemlist) + logger.debug('scraping time: ', time()-scrapingTime) + def _scrapeBlock(self, block): + itemlist = [] + contents = [] -def dbg(): - if config.dev_mode(): + if type(block) == dict: + if 'season' in block and block['season']: self.item.season = block['season'] + if 'lang' in block: self.item.contentLanguage = scrapeLang(block, self.item.contentLanguage) + if 'quality' in block and block['quality']: self.item.quality = block['quality'].strip() + block = block['block'] + + if self.debug: + regexDbg(self.item, self.patron, self.headers, block) + + matches = scrapertools.find_multiple_matches_groups(block, self.patron) + logger.debug('MATCHES =', matches) + + for match in matches: + self.scraped = {} + for kk in self.known_keys: + val = match[kk] if kk in match else '' + if val and (kk == "url" or kk == 'thumb') and 'http' not in val: + domain = '' + if val.startswith('//'): + domain = scrapertools.find_single_match(self.item.url, 'https?:') + elif val.startswith('/'): + domain = scrapertools.find_single_match(self.item.url, 'https?://[a-z0-9.-]+') + val = domain + val + self.scraped[kk] = val.strip() if type(val) == str else val + + self.itemParams.title = cleantitle(self.scraped.get('title', '')) + if self.group and self.scraped.get('title', '') in contents and not self.item.grouped: # same title and grouping enabled + continue + if self.item.grouped and self.scraped.get('title', '') != self.item.fulltitle: # inside a group different tvshow should not be included + continue + + contents.append(self.itemParams.title) + + self.itemParams.title2 = cleantitle(self.scraped.get('title2', '')) if not self.group or self.item.grouped else '' + self.itemParams.quality = self.scraped.get('quality') + self.itemParams.plot = cleantitle(self.scraped.get("plot", '')) + self.itemParams.language = scrapeLang(self.scraped, self.lang) + + self.set_infolabels() + if self.sceneTitle: self.set_sceneTitle() + + if not self.group or self.item.grouped: + self.set_episodes() + + if self.scraped['episode2']: self.itemParams.second_episode = scrapertools.find_single_match(self.scraped['episode2'], r'(\d+)').split('x') + if self.itemParams.season: self.itemParams.infoLabels['season'] = int(self.itemParams.season) + if self.itemParams.episode: self.itemParams.infoLabels['episode'] = int(self.itemParams.episode) + + itemlist.append(self.set_item(match)) + + self.itemlist.extend(itemlist) + self.matches.extend(matches) + + def set_infolabels(self): + if self.item.infoLabels["title"] == self.scraped["title"]: + infolabels = self.item.infoLabels + else: + if self.function == 'episodios': + infolabels = self.item.infoLabels + else: + infolabels = {} + if self.scraped['year']: + infolabels['year'] = self.scraped['year'] + if self.scraped["plot"]: + infolabels['plot'] = self.itemParams.plot + if self.scraped['duration']: + dur = scrapertools.find_multiple_matches(self.scraped['duration'], r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)') + for h, m in dur: + self.scraped['duration'] = int(h) * 60 + int(m) + if not dur: + self.scraped['duration'] = scrapertools.find_single_match(self.scraped['duration'], r'(\d+)') + try: + infolabels['duration'] = int(self.scraped['duration']) * 60 + except: + self.scraped['duration'] = '' + if self.scraped['genere']: + genres = scrapertools.find_multiple_matches(self.scraped['genere'], '[A-Za-z]+') + infolabels['genere'] = ", ".join(genres) + if self.scraped["rating"]: + infolabels['rating'] = scrapertools.decodeHtmlentities(self.scraped["rating"]) + + self.itemParams.infoLabels = infolabels + + def set_sceneTitle(self): + from lib.guessit import guessit try: - import web_pdb - if not web_pdb.WebPdb.active_instance: - import webbrowser - webbrowser.open('http://127.0.0.1:5555') - web_pdb.set_trace() + parsedTitle = guessit(self.itemParams.title) + self.itemParams.title = parsedTitle.get('title', '') + logger.debug('TITOLO',self.itemParams.title) + if parsedTitle.get('source'): + self.itemParams.quality = str(parsedTitle.get('source')) + if parsedTitle.get('screen_size'): + self.itemParams.quality += ' ' + str(parsedTitle.get('screen_size', '')) + if not self.scraped['year']: + if type(parsedTitle.get('year', '')) == list: + self.itemParams.infoLabels['year'] = parsedTitle.get('year', '')[0] + else: + self.itemParams.infoLabels['year'] = parsedTitle.get('year', '') + if parsedTitle.get('episode') and parsedTitle.get('season'): + if type(parsedTitle.get('season')) == list: + self.itemParams.season = parsedTitle.get('season')[0] + elif parsedTitle.get('season'): + self.itemParams.season = parsedTitle.get('season') + + if type(parsedTitle.get('episode')) == list: + self.itemParams.episode = parsedTitle.get('episode')[0] + self.itemParams.second_episode = parsedTitle.get('episode')[1:] + else: + self.itemParams.infoLabels['episode'] = parsedTitle.get('episode') + + elif parsedTitle.get('season') and type(parsedTitle.get('season')) == list: + self.itemParams.extraInfo = '{}: {}-{}'.format(config.get_localized_string(30140), parsedTitle.get('season')[0], parsedTitle.get('season')[-1]) + elif parsedTitle.get('season'): + self.itemParams.season = parsedTitle.get('season') + if parsedTitle.get('episode_title'): + self.itemParams.extraInfo += parsedTitle.get('episode_title') except: - pass + import traceback + logger.error(traceback.format_exc()) + + def set_episodes(self): + ep = unifyEp(self.scraped['episode']) if self.scraped['episode'] else '' + se = self.scraped['season'] if self.scraped['season'].isdigit() else '' + if ep and se: + self.itemParams.season = int(se) + if 'x' in ep: + ep_list = ep.split('x') + self.itemParams.episode = ep_list[0] + self.itemParams.second_episode = ep_list[1:] + else: + self.itemParams.episode = ep + + elif self.item.season: + self.itemParams.season = int(self.item.season) + if ep: self.itemParams.episode = int(scrapertools.find_single_match(self.scraped['episode'], r'(\d+)')) + + elif self.item.contentType == 'tvshow' and (self.scraped['episode'] == '' and self.scraped['season'] == '' and self.itemParams.season == ''): + self.item.news = 'season_completed' + + else: + try: + if 'x' in ep: + ep_list = ep.split('x') + self.itemParams.episode = ep_list[1].strip() + self.itemParams.season = ep_list[0].strip() + if len(ep_list) > 2: + self.itemParams.second_episode = ep_list[2:] + else: + self.itemParams.episode = ep + except: + logger.debug('invalid episode: ' + self.itemParams.episode) + pass + + def set_item(self, match): + AC = '' + CT = '' + if self.typeContentDict: + for name, variants in self.typeContentDict.items(): + if str(self.scraped['type']).lower() in variants: + CT = name + break + else: CT = self.item.contentType + if self.typeActionDict: + for name, variants in self.typeActionDict.items(): + if str(self.scraped['type']).lower() in variants: + AC = name + break + else: AC = self.action + if (not self.scraped['title'] or self.scraped["title"] not in self.blacklist) and (self.search.lower() in self.itemParams.title.lower()): + + it = self.item.clone(title=self.itemParams.title, + fulltitle=self.itemParams.title, + show=self.itemParams.title, + infoLabels=self.itemParams.infoLabels, + contentSeason= self.itemParams.infoLabels.get('season', ''), + contentEpisodeNumber= self.itemParams.infoLabels.get('episode', ''), + grouped = self.group, + episode2 = self.itemParams.second_episode, + extraInfo = self.itemParams.extraInfo, + disable_videolibrary = not self.args.get('addVideolibrary', True), + size = self.scraped['size'], + seed = self.scraped['seed']) + + if self.scraped["url"]: it.url = self.scraped["url"] + if self.function == 'episodios': it.fulltitle = it.show = self.itemParams.title + if self.itemParams.quality: it.quality = self.itemParams.quality + if self.itemParams.language: it.contentLanguage = self.itemParams.language + if self.item.prevthumb: it.thumbnail = self.item.prevthumb + elif self.scraped["thumb"]: it.thumbnail = self.scraped["thumb"] + it.contentType = 'episode' if self.function == 'episodios' else CT if CT else self.item.contentType + if it.contentType not in ['movie'] and self.function != 'episodios' or it.contentType in ['undefined']: it.contentSerieName = self.itemParams.title + if self.function == 'peliculas': it.contentTitle= self.itemParams.title + it.contentSeason= self.itemParams.infoLabels.get('season', ''), + it.contentEpisodeNumber= self.itemParams.infoLabels.get('episode', ''), + if self.itemParams.title2: it.title2 = self.itemParams.title2 + + if self.scraped['episode'] and self.group and not self.item.grouped: + it.action = self.function + elif AC: + it.action = AC + else: + it.action=self.action + + if it.action == 'findvideos': + it.window = True if self.item.window_type == 0 or (config.get_setting("window_type") == 0) else False + if it.window: it.folder = False + + for lg in list(set(match.keys()).difference(self.known_keys)): + it.__setattr__(lg, match[lg]) + + if 'itemHook' in self.args: + try: + it = self.args['itemHook'](it) + except: + raise logger.ChannelScraperException + + if it.contentSeason and it.contentSeason not in self.seasons: + self.seasons.append(it.contentSeason) + + return it def regexDbg(item, patron, headers, data=''): @@ -225,251 +560,6 @@ def unifyEp(ep): return ep -def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang, sceneTitle, group): - itemlist = [] - if debug: - regexDbg(item, patron, headers, block) - matches = scrapertools.find_multiple_matches_groups(block, patron) - logger.debug('MATCHES =', matches) - - known_keys = ['url', 'title', 'title2', 'season', 'episode', 'episode2', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang', 'size', 'seed'] - # Legenda known_keys per i groups nei patron - # known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality', - # 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] - # url = link relativo o assoluto alla pagina titolo film/serie - # title = titolo Film/Serie/Anime/Altro - # title2 = titolo dell'episodio Serie/Anime/Altro - # season = stagione in formato numerico - # episode = numero episodio, in formato numerico. - # thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro - # quality = qualità indicata del video - # year = anno in formato numerico (4 cifre) - # duration = durata del Film/Serie/Anime/Altro - # genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia - # rating = punteggio/voto in formato numerico - # type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito - # lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA. - # AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!! - - season = '' # per quei siti che hanno la stagione nel blocco ma non nelle puntate - episode = '' - second_episode = '' - extraInfo = '' - contents = [] - - for i, match in enumerate(matches): - if pagination and (pag - 1) * pagination > i and not search: continue # pagination - if pagination and i >= pag * pagination and not search: break # pagination - # listGroups = match.keys() - # match = match.values() - - # if len(listGroups) > len(match): # to fix a bug - # match = list(match) - # match.extend([''] * (len(listGroups) - len(match))) - - scraped = {} - for kk in known_keys: - val = match[kk] if kk in match else '' - # val = match[listGroups.index(kk)] if kk in listGroups else '' - if val and (kk == "url" or kk == 'thumb') and 'http' not in val: - domain = '' - if val.startswith('//'): - domain = scrapertools.find_single_match(item.url, 'https?:') - elif val.startswith('/'): - domain = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') - val = domain + val - scraped[kk] = val.strip() if type(val) == str else val - - # episode = re.sub(r'\s-\s|-|x|–|×', 'x', scraped['episode']) if scraped['episode'] else '' - - title = cleantitle(scraped.get('title', '')) - if group and scraped.get('title', '') in contents and not item.grouped: # same title and grouping enabled - continue - if item.grouped and scraped.get('title', '') != item.fulltitle: # inside a group different tvshow should not be included - continue - contents.append(title) - title2 = cleantitle(scraped.get('title2', '')) if not group or item.grouped else '' - quality = scraped.get('quality', '') - # Type = scraped['type'] if scraped['type'] else '' - plot = cleantitle(scraped.get("plot", '')) - - - # if title is set, probably this is a list of episodes or video sources - # necessaria l'aggiunta di == scraped["title"] altrimenti non prende i gruppi dopo le categorie - if item.infoLabels["title"] == scraped["title"]: - infolabels = item.infoLabels - else: - if function == 'episodios': - infolabels = item.infoLabels - else: - infolabels = {} - if scraped['year']: - infolabels['year'] = scraped['year'] - if scraped["plot"]: - infolabels['plot'] = plot - if scraped['duration']: - dur = scrapertools.find_multiple_matches(scraped['duration'], r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)') - for h, m in dur: - scraped['duration'] = int(h) * 60 + int(m) - if not dur: - scraped['duration'] = scrapertools.find_single_match(scraped['duration'], r'(\d+)') - try: - infolabels['duration'] = int(scraped['duration']) * 60 - except: - scraped['duration'] = '' - if scraped['genere']: - genres = scrapertools.find_multiple_matches(scraped['genere'], '[A-Za-z]+') - infolabels['genere'] = ", ".join(genres) - if scraped["rating"]: - infolabels['rating'] = scrapertools.decodeHtmlentities(scraped["rating"]) - - - if not group or item.grouped: - ep = unifyEp(scraped['episode']) if scraped['episode'] else '' - se = scraped['season'] if scraped['season'].isdigit() else '' - if ep and se: - season = int(se) - if 'x' in ep: - ep_list = ep.split('x') - episode = ep_list.strip() - second_episode = ep_list[1:] - else: - episode = ep - - elif item.season: - season = int(item.season) - if ep: episode = int(scrapertools.find_single_match(scraped['episode'], r'(\d+)')) - - elif item.contentType == 'tvshow' and (scraped['episode'] == '' and scraped['season'] == '' and season == ''): - item.news = 'season_completed' - - else: - try: - if 'x' in ep: - ep_list = ep.split('x') - episode = ep_list[1].strip() - season = ep_list[0].strip() - if len(ep_list) > 2: - second_episode = ep_list[2:] - else: - episode = ep - except: - logger.debug('invalid episode: ' + episode) - pass - - if scraped['episode2']: - ep2 = scrapertools.find_single_match(scraped['episode2'], r'(\d+)') - ep_list = ep2.split('x') - second_episode = ep_list - - - if sceneTitle: - from lib.guessit import guessit - try: - parsedTitle = guessit(title) - title = parsedTitle.get('title', '') - logger.debug('TITOLO',title) - if parsedTitle.get('source'): - quality = str(parsedTitle.get('source')) - if parsedTitle.get('screen_size'): - quality += ' ' + str(parsedTitle.get('screen_size', '')) - if not scraped['year']: - if type(parsedTitle.get('year', '')) == list: - infolabels['year'] = parsedTitle.get('year', '')[0] - else: - infolabels['year'] = parsedTitle.get('year', '') - if parsedTitle.get('episode') and parsedTitle.get('season'): - if type(parsedTitle.get('season')) == list: - season = parsedTitle.get('season')[0] - elif parsedTitle.get('season'): - season = parsedTitle.get('season') - - if type(parsedTitle.get('episode')) == list: - episode = parsedTitle.get('episode')[0] - second_episode = parsedTitle.get('episode')[1:] - else: - infolabels['episode'] = parsedTitle.get('episode') - - elif parsedTitle.get('season') and type(parsedTitle.get('season')) == list: - extraInfo = '{}: {}-{}'.format(config.get_localized_string(30140), parsedTitle.get('season')[0], parsedTitle.get('season')[-1]) - elif parsedTitle.get('season'): - season = parsedTitle.get('season') - if parsedTitle.get('episode_title'): - extraInfo += parsedTitle.get('episode_title') - except: - import traceback - logger.error(traceback.format_exc()) - - if season: infolabels['season'] = int(season) - if episode: infolabels['episode'] = int(episode) - - lang1 = scrapeLang(scraped, lang) - - - AC = CT = '' - if typeContentDict: - for name, variants in typeContentDict.items(): - if str(scraped['type']).lower() in variants: - CT = name - break - else: CT = item.contentType - if typeActionDict: - for name, variants in typeActionDict.items(): - if str(scraped['type']).lower() in variants: - AC = name - break - else: AC = action - - if (not scraped['title'] or scraped["title"] not in blacklist) and (search.lower() in title.lower()): - contentType = 'episode' if function == 'episodios' else CT if CT else item.contentType - it = Item( - channel = item.channel, - action = AC if AC else action, - contentType = contentType, - title = title, - fulltitle = item.fulltitle if function == 'episodios' else title, - show=item.show if function == 'episodios' else title, - quality=quality, - url=scraped["url"] if scraped["url"] else item.url, - infoLabels=infolabels, - thumbnail=item.prevthumb if item.prevthumb else item.thumbnail if not scraped["thumb"] else scraped["thumb"], - args=item.args, - contentSerieName= title if contentType not in ['movie'] and function != 'episodios' or contentType in ['undefined'] else item.contentSerieName, - contentTitle= title if contentType in ['movie', 'undefined'] and function == 'peliculas' else item.contentTitle, - contentLanguage = lang1, - contentSeason= infolabels.get('season', ''), - contentEpisodeNumber=infolabels.get('episode', ''), - news = item.news if item.news else '', - # other = scraped['other'] if scraped['other'] else '', - grouped = group, - title2 = cleantitle(title2) if title2 else '', - episode2 = second_episode, - extraInfo = extraInfo, - disable_videolibrary = not args.get('addVideolibrary', True), - size = scraped['size'], - seed = scraped['seed'] - ) - - if scraped['episode'] and group and not item.grouped: # some adjustment for grouping feature - it.action = function - if it.action == 'findvideos': - it.window = True if item.window_type == 0 or (config.get_setting("window_type") == 0) else False - if it.window: it.folder = False - # for lg in list(set(listGroups).difference(known_keys)): - # it.__setattr__(lg, match[listGroups.index(lg)]) - for lg in list(set(match.keys()).difference(known_keys)): - it.__setattr__(lg, match[lg]) - - if 'itemHook' in args: - try: - it = args['itemHook'](it) - except: - raise logger.ChannelScraperException - itemlist.append(it) - - return itemlist, matches - - def html_uniform(data): """ replace all ' with " and eliminate newline, so we don't need to worry about @@ -477,290 +567,34 @@ def html_uniform(data): return re.sub("='([^']+)'", '="\\1"', data.replace('\n', ' ').replace('\t', ' ').replace(' ', ' ')) -def scrape(func): - """https://github.com/kodiondemand/addon/wiki/decoratori#scrape""" +# Debug - def wrapper(*args): - itemlist = [] - - args = func(*args) - function = func.__name__ if not 'actLike' in args else args['actLike'] - # info('STACK= ',inspect.stack()[1][3]) - item = args['item'] - - action = args.get('action', 'findvideos') - anime = args.get('anime', '') - addVideolibrary = args.get('addVideolibrary', True) - search = args.get('search', '') - blacklist = args.get('blacklist', []) - data = args.get('data', '') - patron = args.get('patron', args.get('patronMenu', args.get('patronGenreMenu', ''))) - if 'headers' in args: - headers = args['headers'] - elif 'headers' in func.__globals__: - headers = func.__globals__['headers'] - else: - headers = '' - patronNext = args.get('patronNext', '') - patronTotalPages = args.get('patronTotalPages', '') - patronBlock = args.get('patronBlock', '') - typeActionDict = args.get('typeActionDict', {}) - typeContentDict = args.get('typeContentDict', {}) - debug = args.get('debug', False) - debugBlock = args.get('debugBlock', False) - disabletmdb = args.get('disabletmdb', False) - if 'pagination' in args and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: pagination = args['pagination'] if args['pagination'] else 20 - else: pagination = '' - lang = args.get('deflang', '') - sceneTitle = args.get('sceneTitle') - group = args.get('group', False) - downloadEnabled = args.get('downloadEnabled', True) - pag = item.page if item.page else 1 # pagination - matches = [] - - for n in range(2): - logger.debug('PATRON= ', patron) - if not data: - page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True) - item.url = page.url # might be a redirect - data = page.data - data = html_uniform(data) - scrapingTime = time() - if patronBlock: - if debugBlock: - regexDbg(item, patronBlock, headers, data) - blocks = scrapertools.find_multiple_matches_groups(data, patronBlock) - for bl in blocks: - # info(len(blocks),bl) - if 'season' in bl and bl['season']: - item.season = bl['season'] - blockItemlist, blockMatches = scrapeBlock(item, args, bl['block'], patron, headers, action, pagination, debug, - typeContentDict, typeActionDict, blacklist, search, pag, function, lang, sceneTitle, group) - for it in blockItemlist: - if 'lang' in bl: - it.contentLanguage = scrapeLang(bl, it.contentLanguage) - if 'quality' in bl and bl['quality']: - it.quality = bl['quality'].strip() - # it.title = it.title + typo(bl['quality'].strip(), '_ [] color kod') - itemlist.extend(blockItemlist) - matches.extend(blockMatches) - elif patron: - itemlist, matches = scrapeBlock(item, args, data, patron, headers, action, pagination, debug, typeContentDict, - typeActionDict, blacklist, search, pag, function, lang, sceneTitle, group) - - if 'itemlistHook' in args: - try: - itemlist = args['itemlistHook'](itemlist) - except: - raise logger.ChannelScraperException - - # if url may be changed and channel has findhost to update - if 'findhost' in func.__globals__ and not itemlist and n == 0: - info('running findhost ' + func.__module__) - ch = func.__module__.split('.')[-1] - try: - host = config.get_channel_url(func.__globals__['findhost'], ch, True) - - parse = list(urlparse.urlparse(item.url)) - parse[1] = scrapertools.get_domain_from_url(host) - item.url = urlparse.urlunparse(parse) - except: - raise logger.ChannelScraperException - data = None - itemlist = [] - matches = [] - else: - break - - if not data: - from platformcode.logger import WebErrorException - raise WebErrorException(urlparse.urlparse(item.url)[1], item.channel) - - if group and item.grouped or args.get('groupExplode'): - import copy - nextArgs = copy.copy(args) - @scrape - def newFunc(): - return nextArgs - nextArgs['item'] = nextPage(itemlist, item, data, patronNext, function, patron_total_pages=patronTotalPages) - nextArgs['group'] = False - if nextArgs['item']: - nextArgs['groupExplode'] = True - itemlist.pop() # remove next page just added - itemlist.extend(newFunc()) - else: - nextArgs['groupExplode'] = False - nextArgs['item'] = item - itemlist = newFunc() - itemlist = [i for i in itemlist if i.action not in ['add_movie_to_library', 'add_serie_to_library']] - - if anime and inspect.stack()[1][3] not in ['find_episodes']: - from platformcode import autorenumber - if function == 'episodios': autorenumber.start(itemlist, item) - else: autorenumber.start(itemlist, item) - - if action != 'play' and 'patronMenu' not in args and 'patronGenreMenu' not in args and not disabletmdb and inspect.stack()[1][3] not in ['add_tvshow'] and function not in ['episodios', 'mainlist'] or (function in ['episodios'] and config.get_setting('episode_info')): # and function != 'episodios' and item.contentType in ['movie', 'tvshow', 'episode', 'undefined'] - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - if not group and not args.get('groupExplode') and ((pagination and len(matches) <= pag * pagination) or not pagination): # next page with pagination - if patronNext and inspect.stack()[1][3] not in ['newest'] and len(inspect.stack()) > 2 and inspect.stack()[2][3] not in ['get_channel_results']: - nextPage(itemlist, item, data, patronNext, function, patron_total_pages=patronTotalPages) +def dbg(): + if config.dev_mode(): + try: + import web_pdb + if not web_pdb.WebPdb.active_instance: + import webbrowser + webbrowser.open('http://127.0.0.1:5555') + web_pdb.set_trace() + except: + pass - # next page for pagination - if pagination and len(matches) > pag * pagination and not search: - if inspect.stack()[1][3] not in ['newest','get_newest']: - itemlist.append( - Item(channel=item.channel, - action = item.action, - contentType=item.contentType, - title=typo(config.get_localized_string(30992), 'color kod bold'), - fulltitle= item.fulltitle, - show= item.show, - url=item.url, - args=item.args, - page=pag + 1, - thumbnail=thumb(), - prevthumb=item.prevthumb if item.prevthumb else item.thumbnail)) +# Menu - - if inspect.stack()[1][3] not in ['find_episodes', 'add_tvshow']: - if addVideolibrary and (item.infoLabels["title"] or item.fulltitle): - # item.fulltitle = item.infoLabels["title"] - videolibrary(itemlist, item, function=function) - if downloadEnabled and function == 'episodios' or function == 'findvideos': - download(itemlist, item, function=function) - - if 'patronGenreMenu' in args and itemlist: - itemlist = thumb(itemlist, mode='genre') - if 'patronMenu' in args and itemlist: - itemlist = thumb(itemlist) - - if 'fullItemlistHook' in args: - try: - itemlist = args['fullItemlistHook'](itemlist) - except: - raise logger.ChannelScraperException - - # itemlist = filterLang(item, itemlist) # causa problemi a newest - - if config.get_setting('trakt_sync'): - from core import trakt_tools - trakt_tools.trakt_check(itemlist) - logger.debug('scraping time: ', time()-scrapingTime) - return itemlist - - return wrapper - - -def dooplay_get_links(item, host, paramList=[]): - # get links from websites using dooplay theme and dooplay_player - # return a list of dict containing these values: url, title and server - if not paramList: - data = httptools.downloadpage(item.url).data.replace("'", '"') - patron = r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?' - matches = scrapertools.find_multiple_matches(data, patron) - else: - matches = paramList - ret = [] - - for type, post, nume, title, server in matches: - postData = urlencode({ - "action": "doo_player_ajax", - "post": post, - "nume": nume, - "type": type - }) - dataAdmin = httptools.downloadpage(host + '/wp-admin/admin-ajax.php', post=postData,headers={'Referer': item.url}).data - link = scrapertools.find_single_match(dataAdmin, r"<iframe.*src='([^']+)'") - if not link: link = scrapertools.find_single_match(dataAdmin, r'"embed_url":"([^"]+)"').replace('\\','') - ret.append({ - 'url': link, - 'title': title, - 'server': server - }) - - return ret - - -@scrape -def dooplay_get_episodes(item): - item.contentType = 'tvshow' - patron = '<li class="mark-[0-9]+">.*?<img.*?(?:data-lazy-)?src="(?P<thumb>[^"]+).*?(?P<episode>[0-9]+ - [0-9]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+).*?(?P<year>[0-9]{4})' - actLike = 'episodios' - - return locals() - - -@scrape -def dooplay_peliculas(item, mixed=False, blacklist=""): - actLike = 'peliculas' - # debug = True - if item.args == 'searchPage': - return dooplay_search_vars(item, blacklist) - else: - if item.contentType == 'movie': - action = 'findvideos' - patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+).*?)?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>[^<>]*(?P<year>[0-9]{4})</span>|</article>)' - else: - action = 'episodios' - patron = '<article id="post-[0-9]+" class="item (?P<type>' + ('\w+' if mixed else 'tvshows') + ')">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' - patronNext = '<div class="pagination">.*?class="current".*?<a href="([^"]+)".*?<div class="resppages">' - addVideolibrary = False - - if mixed: - typeActionDict={'findvideos': ['movies'], 'episodios': ['tvshows']} - typeContentDict={'film': ['movies'], 'serie': ['tvshows']} - - return locals() - - -@scrape -def dooplay_search(item, blacklist=""): - return dooplay_search_vars(item, blacklist) - - -def dooplay_search_vars(item, blacklist): - if item.contentType == 'list': # ricerca globale - type = '(?P<type>movies|tvshows)' - typeActionDict = {'findvideos': ['movies'], 'episodios': ['tvshows']} - typeContentDict = {'movie': ['movies'], 'tvshow': ['tvshows']} - elif item.contentType == 'movie': - type = 'movies' - action = 'findvideos' - else: - type = 'tvshows' - action = 'episodios' - patron = '<div class="result-item">.*?<img src="(?P<thumb>[^"]+)".*?<span class="' + type + '">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a>.*?<span class="year">(?P<year>[0-9]{4}).*?<div class="contenido"><p>(?P<plot>[^<>]+)' - patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"' - - return locals() - - -def dooplay_menu(item, type): - patronMenu = '<a href="(?P<url>[^"#]+)"(?: title="[^"]+")?>(?P<title>[a-zA-Z0-9]+)' - patronBlock = '<nav class="' + item.args + '">(?P<block>.*?)</nav>' - action = 'peliculas' - - return locals() - - -def menuItem(itemlist, filename, title='', action='', url='', contentType='undefined', args=[], style=True): +def menuItem(itemlist, channel, title='', action='', url='', contentType='undefined', args=[], style=True): # Function to simplify menu creation # Call typo function if style: title = typo(title) - if contentType == 'movie': extra = 'movie' - else: extra = 'tvshow' - itemlist.append(Item( - channel = filename, + channel = channel, title = title, action = action, url = url, - extra = extra, args = args, contentType = contentType, )) @@ -777,10 +611,15 @@ def menu(func): host = func.__globals__['host'] menuHost = args.get('host','') if menuHost: host = menuHost - filename = func.__module__.split('.')[1] + channel = func.__module__.split('.')[1] single_search = False # listUrls = ['film', 'filmSub', 'tvshow', 'tvshowSub', 'anime', 'animeSub', 'search', 'top', 'topSub'] listUrls = ['top', 'film', 'tvshow', 'anime', 'search', 'host'] + names = {'film':config.get_localized_string(30122), + 'tvshow':config.get_localized_string(30123), + 'anime':config.get_localized_string(30124), + 'doc':config.get_localized_string(30125), + 'music':config.get_localized_string(30139)} listUrls_extra = [] dictUrl = {} @@ -792,9 +631,7 @@ def menu(func): for name in listUrls: dictUrl[name] = args.get(name, None) logger.debug(dictUrl[name]) - if name == 'film': title = 'Film' - if name == 'tvshow': title = 'Serie TV' - if name == 'anime': title = 'Anime' + if name in names: title = names[name] if name == 'search' and dictUrl[name] is not None: single_search = True @@ -803,7 +640,7 @@ def menu(func): elif name == 'top' and dictUrl[name] is not None: if not global_search: for sub, var in dictUrl['top']: - menuItem(itemlist, filename, + menuItem(itemlist, channel, title = sub + '{italic bold}', url = host + var[0] if len(var) > 0 else '', action = var[1] if len(var) > 1 else 'peliculas', @@ -818,22 +655,24 @@ def menu(func): url = dictUrl[name][0] if type(dictUrl[name][0]) is not tuple and len(dictUrl[name][0]) > 0 else '' if not global_search: - menuItem(itemlist, filename, + menuItem(itemlist, channel, title + '{bullet bold}', 'peliculas', host + url, contentType='movie' if name == 'film' else 'tvshow') + if len(dictUrl[name]) > 0: if type(dictUrl[name][0]) is not tuple and type(dictUrl[name]) is not str: dictUrl[name].pop(0) + if dictUrl[name] is not None and type(dictUrl[name]) is not str: for sub, var in dictUrl[name]: - menuItem(itemlist, filename, + menuItem(itemlist, channel, title = sub + '{submenu} {' + title + '}', url = host + var[0] if len(var) > 0 else '', action = var[1] if len(var) > 1 else 'peliculas', args=var[2] if len(var) > 2 else '', contentType= var[3] if len(var) > 3 else 'movie' if name == 'film' else 'tvshow') # add search menu for category - if 'search' not in args: menuItem(itemlist, filename, config.get_localized_string(70741) % title + '… {submenu bold}', 'search', host + url, contentType='movie' if name == 'film' else 'tvshow', style=not global_search) + if 'search' not in args: menuItem(itemlist, channel, config.get_localized_string(70741) % title + '… {submenu bold}', 'search', host + url, contentType='movie' if name == 'film' else 'tvshow', style=not global_search) # Make EXTRA MENU (on bottom) for name, var in args.items(): @@ -844,7 +683,7 @@ def menu(func): dictUrl[name] = args.get(name, None) for sub, var in dictUrl[name]: # sub = scrapertools.unescape(sub) - menuItem(itemlist, filename, + menuItem(itemlist, channel, title = sub, url = host + var[0] if len(var) > 0 else '', action = var[1] if len(var) > 1 else 'peliculas', @@ -852,7 +691,7 @@ def menu(func): contentType= var[3] if len(var) > 3 else 'movie',) if single_search: - menuItem(itemlist, filename, config.get_localized_string(70741).replace(' %s', '… {bold}'), 'search', host + dictUrl['search'], style=not global_search) + menuItem(itemlist, channel, config.get_localized_string(70741).replace(' %s', '… {bold}'), 'search', host + dictUrl['search'], style=not global_search) if not global_search: channel_config(item, itemlist) @@ -865,78 +704,7 @@ def menu(func): return wrapper -def typo(string, typography=''): - - kod_color = '0xFF65B3DA' #'0xFF0081C2' - - try: string = str(string) - except: string = str(string.encode('utf8')) - - if config.get_localized_string(30992) in string: - string = string + ' >' - - if int(config.get_setting('view_mode_channel').split(',')[-1]) in [0, 50, 55]: - VLT = True - else: - VLT = False - - - if not typography and '{' in string: - typography = string.split('{')[1].strip(' }').lower() - string = string.replace('{' + typography + '}','').strip() - else: - string = string - typography.lower() - - if 'capitalize' in typography: - string = string.capitalize() - typography = typography.replace('capitalize', '') - if 'uppercase' in typography: - string = string.upper() - typography = typography.replace('uppercase', '') - if 'lowercase' in typography: - string = string.lower() - typography = typography.replace('lowercase', '') - if '[]' in typography: - string = '[' + string + ']' - typography = typography.replace('[]', '') - if '()' in typography: - string = '(' + string + ')' - typography = typography.replace('()', '') - if 'submenu' in typography: - if VLT: string = "•• " + string - else: string = string - typography = typography.replace('submenu', '') - if 'color kod' in typography: - string = '[COLOR ' + kod_color + ']' + string + '[/COLOR]' - typography = typography.replace('color kod', '') - elif 'color' in typography: - color = scrapertools.find_single_match(typography, 'color ([a-zA-Z0-9]+)') - string = '[COLOR ' + color + ']' + string + '[/COLOR]' - typography = typography.replace('color ' + color, '') - if 'bold' in typography: - string = '[B]' + string + '[/B]' - typography = typography.replace('bold', '') - if 'italic' in typography: - string = '[I]' + string + '[/I]' - typography = typography.replace('italic', '') - if '__' in typography: - string = string + ' ' - typography = typography.replace('__', '') - if '_' in typography: - string = ' ' + string - typography = typography.replace('_', '') - if '--' in typography: - string = ' - ' + string - typography = typography.replace('--', '') - if 'bullet' in typography: - if VLT: string = '[B]' + "•" + '[/B] ' + string - else: string = string - typography = typography.replace('bullet', '') - typography = typography.strip() - if typography: string = string + '{' + typography + '}' - return string - +# Match def match(item_url_string, **args): ''' @@ -966,9 +734,28 @@ def match(item_url_string, **args): matches: all the matches ''' + def match_dbg(data, patron): + import json, webbrowser + url = 'https://regex101.com' + headers = {'content-type': 'application/json'} + data = { + 'regex': patron, + 'flags': 'gm', + 'testString': data, + 'delimiter': '"""', + 'flavor': 'python' + } + js = json.dumps(data).encode() if PY3 else json.dumps(data, encoding='latin1') + r = Request(url + '/api/regex', js, headers=headers) + r = urlopen(r).read() + permaLink = json.loads(r)['permalinkFragment'] + webbrowser.open(url + "/r/" + permaLink) + matches = [] blocks = [] + response = None url = None + # arguments allowed for scrape patron = args.get('patron', None) patronBlock = args.get('patronBlock', None) @@ -986,18 +773,17 @@ def match(item_url_string, **args): elif isinstance(item_url_string, Item): # if item_url_string is an item use item.url as url url = item_url_string.url - else: - if item_url_string.startswith('http'): url = item_url_string - else : data = item_url_string - # else: - # # if item_url_string is an item use item.url as url - # url = item_url_string.url + elif item_url_string.startswith('http'): + url = item_url_string + else : + data = item_url_string # if there is a url, download the page if url: if args.get('ignore_response_code', None) is None: args['ignore_response_code'] = True - data = httptools.downloadpage(url, **args).data + response = httptools.downloadpage(url, **args) + data = response.data # format page data data = html_uniform(data) @@ -1015,7 +801,8 @@ def match(item_url_string, **args): # match if patron: - if type(patron) == str: patron = [patron] + if type(patron) == str: + patron = [patron] for b in blocks: for p in patron: matches += scrapertools.find_multiple_matches(b, p) @@ -1029,32 +816,306 @@ def match(item_url_string, **args): for p in patron: match_dbg(block, p) + # create a item item = Item(data=data, blocks=blocks, block=blocks[0] if len(blocks) > 0 else '', matches=matches, - match=matches[0] if len(matches) > 0 else '') + match=matches[0] if len(matches) > 0 else '', + response = response) return item -def match_dbg(data, patron): - import json, webbrowser - url = 'https://regex101.com' - headers = {'content-type': 'application/json'} - data = { - 'regex': patron, - 'flags': 'gm', - 'testString': data, - 'delimiter': '"""', - 'flavor': 'python' - } - js = json.dumps(data).encode() if PY3 else json.dumps(data, encoding='latin1') - r = Request(url + '/api/regex', js, headers=headers) - r = urlopen(r).read() - permaLink = json.loads(r)['permalinkFragment'] - webbrowser.open(url + "/r/" + permaLink) +# pagination + +def nextPage(itemlist, item, function_or_level=1, **kwargs): + ''' + Function_level is useful if the function is called by another function. + If the call is direct, leave it blank + itemlist = list of item -> required + item = item -> required + function_or_level = function to call or level of monitored function, integer or string -> optional def:1 + + OPTIONAL ARGS + data = data of the page + patron = regex to find the next page + patron_total_pages = regex to find number of total pages + next_page = link to next page + resub = list of 2 values for resub _next_page + page = integer, the page for next page + total_pages = integer, the number of total pages + ''' + logger.debug() + + # get optional args + data = kwargs.get('data', '') + patron = kwargs.get('patron', '') + patron_total_pages = kwargs.get('patron_total_pages', '') + next_page = kwargs.get('next_page', None) + resub = kwargs.get('resub', []) + page = kwargs.get('page', None) + total_pages = kwargs.get('total_pages', None) + + # create Item + if patron or page or next_page: + nextItem = item.clone(action = inspect.stack()[function_or_level][3] if type(function_or_level) == int else function_or_level, + title=typo(config.get_localized_string(30992), 'color kod bold'), + nextPage=True, + page=page if page else item.page + 1 if item.page else 2, + thumbnail=thumb()) + + # get next_page from data + if data and patron: + next_page = scrapertools.find_single_match(data, patron) + + # resub an host to url + if next_page: + if resub: next_page = re.sub(resub[0], resub[1], next_page) + if 'http' not in next_page: + if '/' in next_page: + next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page) + else: + next_page = '/'.join(item.url.split('/')[:-1]) + '/' + next_page + next_page = next_page.replace('&', '&') + item.url = next_page + + # get total pages from data + if data and patron_total_pages: + found = scrapertools.find_single_match(data, patron_total_pages).replace('.','').replace(',','') + if found.isdigit(): + item.total_pages = int(found) + + # set total pages from value + if total_pages: + item.total_pages = total_pages + + if next_page or page: + itemlist.append(item.clone(action = inspect.stack()[function_or_level][3] if type(function_or_level) == int else function_or_level, + title=typo(config.get_localized_string(30992), 'color kod bold'), + nextPage=True, + page=page if page else item.page + 1 if item.page else 2, + prevthumb = item.thumbnail, + thumbnail=thumb())) + return itemlist + + +def pagination(itemlist, item, function_level=1): + itemlistdb(itemlist) + page = item.page if item.page else 1 + perpage = config.get_setting('pagination', default=20) + action = function_level if type(function_level) == str else inspect.stack()[function_level][3] + itlist = [] + for i, it in enumerate(itemlist): + if perpage and (page - 1) * perpage > i: continue # pagination + if perpage and i >= page * perpage: break # pagination + itlist.append(it) + if len(itemlist) >= page * perpage: + itlist.append( + item.clone(channel=item.channel, + action=action, + contentType=item.contentType, + title=typo(config.get_localized_string(30992), 'color kod bold'), + page=page + 1, + total_pages=round(len(itemlist)/perpage), + nextPage = True, + itemlist = True, + prevthumb = item.thumbnail, + thumbnail=thumb())) + return itlist + + +def season_pagination(itemlist, item, seasons, function_level=1): + itemlistdb(itemlist) + action = function_level if type(function_level) == str else inspect.stack()[function_level][3] + itlist = [] + if itemlist and not seasons: + seasons = [] + for it in itemlist: + if it.contentSeason and it.contentSeason not in seasons: + seasons.append(it.contentSeason) + + if seasons: + seasons.sort() + if not item.nextSeason: item.nextSeason = 0 + try: + current = seasons[item.nextSeason] + + for it in itemlist: + if it.contentSeason and it.contentSeason == current: + itlist.append(it) + elif it.contentSeason and it.contentSeason > current: + break + + if item.nextSeason + 1 < len(seasons): + itlist.append( + item.clone(action=action, + title=typo('Stagione Successiva [{}]'.format(seasons[item.nextSeason + 1]), 'bold'), + allSeasons = seasons, + nextSeason = item.nextSeason + 1, + itemlist = True, + prevthumb = item.thumbnail, + thumbnail=thumb())) + itlist.append( + item.clone(action='gotoseason', + real_action=action, + title=typo('Vai alla stagione…', 'bold'), + allSeasons = seasons, + nextSeason = item.nextSeason + 1, + itemlist = True, + prevthumb = item.thumbnail, + thumbnail=thumb())) + return itlist + except: + return itemlist + + +# Find servers + +def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True, Download=True, patronTag=None, Videolibrary=True): + logger.debug() + + if not data and not itemlist: + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data + if data: + itemList = servertools.find_video_items(data=str(data)) + itemlist = itemlist + itemList + verifiedItemlist = [] + + def getItem(videoitem): + if not videoitem.video_urls: + srv_param = servertools.get_server_parameters(videoitem.server.lower()) + if not srv_param: # do not exists or it's empty + findS = servertools.get_server_from_url(videoitem.url) + logger.debug(findS) + if not findS: + if item.channel == 'community': + findS= (config.get_localized_string(30137), videoitem.url, 'directo') + else: + videoitem.url = unshortenit.unshorten_only(videoitem.url)[0] + findS = servertools.get_server_from_url(videoitem.url) + if not findS: + logger.debug(videoitem, 'Non supportato') + return + videoitem.server = findS[2] + videoitem.serverName= findS[0] + videoitem.url = findS[1] + srv_param = servertools.get_server_parameters(videoitem.server.lower()) + else: + videoitem.server = videoitem.server.lower() + + if videoitem.video_urls or srv_param.get('active', False): + quality = videoitem.quality if videoitem.quality else item.quality if item.quality else '' + # videoitem = item.clone(url=videoitem.url, serverName=videoitem.serverName, server=videoitem.server, action='play') + videoitem.contentLanguage = videoitem.contentLanguage if videoitem.contentLanguage else item.contentLanguage if item.contentLanguage else 'ITA' + videoitem.serverName = videoitem.title if videoitem.server == 'directo' else servertools.get_server_parameters(videoitem.server).get('name', videoitem.server.capitalize()) + # videoitem.title = item.contentTitle.strip() if item.contentType == 'movie' and item.contentTitle or (config.get_localized_string(30161) in item.fulltitle) else item.fulltitle + videoitem.plot = typo(videoitem.title, 'bold') + (typo(quality, '_ [] bold') if quality else '') + videoitem.channel = item.channel + videoitem.fulltitle = item.fulltitle + videoitem.show = item.show + if not videoitem.video_urls: videoitem.thumbnail = item.thumbnail + videoitem.contentType = item.contentType + videoitem.infoLabels = item.infoLabels + videoitem.quality = quality + videoitem.referer = item.referer if item.referer else item.url + videoitem.action = "play" + videoitem.videolibrary_id = item.videolibrary_id + videoitem.from_library = item.from_library + return videoitem + + # non threaded for webpdb + # dbg() + # thL = [getItem(videoitem) for videoitem in itemlist if videoitem.url or videoitem.video_urls] + # for it in thL: + # if it and not config.get_setting("black_list", server=it.server.lower()): + # verifiedItemlist.append(it) + + with futures.ThreadPoolExecutor() as executor: + thL = [executor.submit(getItem, videoitem) for videoitem in itemlist if videoitem.url or videoitem.video_urls] + for it in futures.as_completed(thL): + if it.result(): + verifiedItemlist.append(it.result()) + try: + verifiedItemlist.sort(key=lambda it: int(re.sub(r'\D','',it.quality))) + except: + verifiedItemlist.sort(key=lambda it: it.quality, reverse=True) + if patronTag: + addQualityTag(item, verifiedItemlist, data, patronTag) + + # Check Links + if not item.global_search and config.get_setting('checklinks') and CheckLinks and not config.get_setting('autoplay'): + checklinks_number = config.get_setting('checklinks_number') + verifiedItemlist = servertools.check_list_links(verifiedItemlist, checklinks_number) + + try: + if AutoPlay and item.contentChannel not in ['downloads', 'videolibrary']: + verifiedItemlist = autoplay.start(verifiedItemlist, item) + except: + import traceback + logger.error(traceback.format_exc()) + pass + + verifiedItemlist = servertools.sort_servers(verifiedItemlist) + + if Videolibrary and item.contentChannel != 'videolibrary': + videolibrary(verifiedItemlist, item) + if Download: + download(verifiedItemlist, item, function_level=3) + + return verifiedItemlist + + +# extra item + +def videolibrary(itemlist, item, typography='', function_level=1, function=''): + # Simply add this function to add video library support + # Function_level is useful if the function is called by another function. + # If the call is direct, leave it blank + logger.debug() + + if item.contentType == 'movie': + action = 'add_to_library' + contentType = 'movie' + else: + action = 'add_to_library' + contentType = 'tvshow' + + function = function if function else inspect.stack()[function_level][3] + # go up until find findvideos/episodios + while function not in ['findvideos', 'episodios']: + function_level += 1 + try: + function = inspect.stack()[function_level][3] + except: + break + + if not typography: typography = 'color kod bold' + + title = typo(config.get_localized_string(30161), typography) + contentSerieName=item.contentSerieName if item.contentSerieName else item.fulltitle if item.contentType != 'movie' else '' + contentTitle=item.contentTitle if item.contentTitle else item.fulltitle if item.contentType == 'movie' else '' + + if (function == 'findvideos' and contentType == 'movie') \ + or (function == 'episodios' and contentType != 'movie'): + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + item.clone(channel=item.channel, + title=title, + fulltitle=item.fulltitle, + show=item.fulltitle, + contentType=contentType, + contentTitle=contentTitle, + contentSerieName=contentSerieName, + url=item.url, + action=action, + from_action=item.action, + path=item.path, + thumbnail=thumb('add_to_videolibrary') + )) + + return itemlist def download(itemlist, item, typography='', function_level=1, function=''): @@ -1132,210 +1193,7 @@ def download(itemlist, item, typography='', function_level=1, function=''): return itemlist -def videolibrary(itemlist, item, typography='', function_level=1, function=''): - # Simply add this function to add video library support - # Function_level is useful if the function is called by another function. - # If the call is direct, leave it blank - logger.debug() - - if item.contentType == 'movie': - action = 'add_to_library' - contentType = 'movie' - else: - action = 'add_to_library' - contentType = 'tvshow' - - function = function if function else inspect.stack()[function_level][3] - # go up until find findvideos/episodios - while function not in ['findvideos', 'episodios']: - function_level += 1 - try: - function = inspect.stack()[function_level][3] - except: - break - - if not typography: typography = 'color kod bold' - - title = typo(config.get_localized_string(30161), typography) - contentSerieName=item.contentSerieName if item.contentSerieName else item.fulltitle if item.contentType != 'movie' else '' - contentTitle=item.contentTitle if item.contentTitle else item.fulltitle if item.contentType == 'movie' else '' - - if (function == 'findvideos' and contentType == 'movie') \ - or (function == 'episodios' and contentType != 'movie'): - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append( - item.clone(channel=item.channel, - title=title, - fulltitle=item.fulltitle, - show=item.fulltitle, - contentType=contentType, - contentTitle=contentTitle, - contentSerieName=contentSerieName, - url=item.url, - action=action, - from_action=item.action, - path=item.path, - thumbnail=thumb('add_to_videolibrary') - )) - - return itemlist - - -def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page='', resub=[], patron_total_pages='', total_pages=0): - # Function_level is useful if the function is called by another function. - # If the call is direct, leave it blank - logger.debug() - page=None - # page = item.page if item.page else 2 - action = inspect.stack()[function_or_level][3] if type(function_or_level) == int else function_or_level - - if not data and not patron and not next_page: - itemlist.append( - item.clone(action = action, - title=typo(config.get_localized_string(30992), 'color kod bold'), - nextPage=True, - thumbnail=thumb())) - return itemlist[-1] - - if next_page == '': - next_page = scrapertools.find_single_match(data, patron) - - if patron_total_pages: - found = scrapertools.find_single_match(data, patron_total_pages).replace('.','').replace(',','') - if found.isdigit(): - total_pages = int(found) - - if next_page != "": - if resub: next_page = re.sub(resub[0], resub[1], next_page) - if 'http' not in next_page: - if '/' in next_page: - next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page) - else: - next_page = '/'.join(item.url.split('/')[:-1]) + '/' + next_page - next_page = next_page.replace('&', '&') - logger.debug('NEXT= ', next_page) - itemlist.append( - item.clone(action = action, - title=typo(config.get_localized_string(30992), 'color kod bold'), - url=next_page, - nextPage=True, - total_pages=total_pages, - page = page, - thumbnail=thumb())) - return itemlist[-1] - - -def pagination(itemlist, item, page, perpage, function_level=1): - itlist = [] - for i, it in enumerate(itemlist): - if perpage and (page - 1) * perpage > i: continue # pagination - if perpage and i >= page * perpage: break # pagination - itlist.append(it) - if len(itemlist) >= page * perpage: - itlist.append( - item.clone(channel=item.channel, - action=inspect.stack()[function_level][3], - contentType=item.contentType, - title=typo(config.get_localized_string(30992), 'color kod bold'), - page=page + 1, - total_pages=round(len(itemlist)/perpage), - nextPage=True, - thumbnail=thumb())) - return itlist - - -def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True, Download=True, patronTag=None, Videolibrary=True): - logger.debug() - - if not data and not itemlist: - data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data - if data: - itemList = servertools.find_video_items(data=str(data)) - itemlist = itemlist + itemList - verifiedItemlist = [] - - def getItem(videoitem): - if not videoitem.video_urls: - srv_param = servertools.get_server_parameters(videoitem.server.lower()) - if not srv_param: # do not exists or it's empty - findS = servertools.get_server_from_url(videoitem.url) - info(findS) - if not findS: - if item.channel == 'community': - findS= (config.get_localized_string(30137), videoitem.url, 'directo') - else: - videoitem.url = unshortenit.unshorten_only(videoitem.url)[0] - findS = servertools.get_server_from_url(videoitem.url) - if not findS: - info(videoitem, 'Non supportato') - return - videoitem.server = findS[2] - videoitem.serverName= findS[0] - videoitem.url = findS[1] - srv_param = servertools.get_server_parameters(videoitem.server.lower()) - else: - videoitem.server = videoitem.server.lower() - - if videoitem.video_urls or srv_param.get('active', False): - quality = videoitem.quality if videoitem.quality else item.quality if item.quality else '' - # videoitem = item.clone(url=videoitem.url, serverName=videoitem.serverName, server=videoitem.server, action='play') - videoitem.contentLanguage = videoitem.contentLanguage if videoitem.contentLanguage else item.contentLanguage if item.contentLanguage else 'ITA' - videoitem.serverName = videoitem.title if videoitem.server == 'directo' else servertools.get_server_parameters(videoitem.server).get('name', videoitem.server.capitalize()) - # videoitem.title = item.contentTitle.strip() if item.contentType == 'movie' and item.contentTitle or (config.get_localized_string(30161) in item.fulltitle) else item.fulltitle - videoitem.plot = typo(videoitem.title, 'bold') + (typo(quality, '_ [] bold') if quality else '') - videoitem.channel = item.channel - videoitem.fulltitle = item.fulltitle - videoitem.show = item.show - if not videoitem.video_urls: videoitem.thumbnail = item.thumbnail - videoitem.contentType = item.contentType - videoitem.infoLabels = item.infoLabels - videoitem.quality = quality - videoitem.referer = item.referer if item.referer else item.url - videoitem.action = "play" - videoitem.videolibrary_id = item.videolibrary_id - videoitem.from_library = item.from_library - return videoitem - - # non threaded for webpdb - # dbg() - # thL = [getItem(videoitem) for videoitem in itemlist if videoitem.url or videoitem.video_urls] - # for it in thL: - # if it and not config.get_setting("black_list", server=it.server.lower()): - # verifiedItemlist.append(it) - - with futures.ThreadPoolExecutor() as executor: - thL = [executor.submit(getItem, videoitem) for videoitem in itemlist if videoitem.url or videoitem.video_urls] - for it in futures.as_completed(thL): - if it.result(): - verifiedItemlist.append(it.result()) - try: - verifiedItemlist.sort(key=lambda it: int(re.sub(r'\D','',it.quality))) - except: - verifiedItemlist.sort(key=lambda it: it.quality, reverse=True) - if patronTag: - addQualityTag(item, verifiedItemlist, data, patronTag) - - # Check Links - if not item.global_search and config.get_setting('checklinks') and CheckLinks and not config.get_setting('autoplay'): - checklinks_number = config.get_setting('checklinks_number') - verifiedItemlist = servertools.check_list_links(verifiedItemlist, checklinks_number) - - try: - if AutoPlay and item.contentChannel not in ['downloads', 'videolibrary']: - verifiedItemlist = autoplay.start(verifiedItemlist, item) - except: - import traceback - logger.error(traceback.format_exc()) - pass - - verifiedItemlist = servertools.sort_servers(verifiedItemlist) - - if Videolibrary and item.contentChannel != 'videolibrary': - videolibrary(verifiedItemlist, item) - if Download: - download(verifiedItemlist, item, function_level=3) - - return verifiedItemlist +# utility def filterLang(item, itemlist): @@ -1436,36 +1294,7 @@ def addQualityTag(item, itemlist, data, patron): folder=False, thumbnail=thumb('info'))) else: - info('nessun tag qualità trovato') - -def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False): - from core import jsontools - video_urls = [] - block = scrapertools.find_single_match(data, r'sources"?\s*:\s*(.*?}])') if not dataIsBlock else data - if block: - json = jsontools.load(block) - if json: - sources = [] - for s in json: - if 'file' in s.keys(): - src = s['file'] - else: - src = s['src'] - sources.append((src, s.get('label'))) - else: - if 'file:' in block: - sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?') - elif 'src:' in block: - sources = scrapertools.find_multiple_matches(block, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:,[^,]+,\s*label:\s*"([^"]+)")?') - else: - sources =[(block.replace('"',''), '')] - for url, quality in sources: - quality = 'auto' if not quality else quality - if url.split('.')[-1] != 'mpd': - video_urls.append({'type':url.split('.')[-1], 'res':quality, 'url':url if not onlyHttp else url.replace('https://', 'http://')}) - - # video_urls.sort(key=lambda x: x[0].split()[1]) - return video_urls + logger.debug('nessun tag qualità trovato') def thumb(data=None, mode=None): @@ -1711,3 +1540,292 @@ def get_thumb(thumb_name, view="thumb_"): icon_pack_name = config.get_setting('icon_set', default="default") media_path = filetools.join("https://raw.githubusercontent.com/kodiondemand/media/master/themes/new", icon_pack_name) return filetools.join(media_path, thumb_name) + + +def color(text, color): + return "[COLOR " + color + "]" + text + "[/COLOR]" + + +def typo(string, typography=''): + + kod_color = '0xFF65B3DA' #'0xFF0081C2' + + try: string = str(string) + except: string = str(string.encode('utf8')) + + if config.get_localized_string(30992) in string: + string = string + ' >' + + if int(config.get_setting('view_mode_channel').split(',')[-1]) in [0, 50, 55]: + VLT = True + else: + VLT = False + + + if not typography and '{' in string: + typography = string.split('{')[1].strip(' }').lower() + string = string.replace('{' + typography + '}','').strip() + else: + string = string + typography.lower() + + if 'capitalize' in typography: + string = string.capitalize() + typography = typography.replace('capitalize', '') + if 'uppercase' in typography: + string = string.upper() + typography = typography.replace('uppercase', '') + if 'lowercase' in typography: + string = string.lower() + typography = typography.replace('lowercase', '') + if '[]' in typography: + string = '[' + string + ']' + typography = typography.replace('[]', '') + if '()' in typography: + string = '(' + string + ')' + typography = typography.replace('()', '') + if 'submenu' in typography: + if VLT: string = "•• " + string + else: string = string + typography = typography.replace('submenu', '') + if 'color kod' in typography: + string = '[COLOR ' + kod_color + ']' + string + '[/COLOR]' + typography = typography.replace('color kod', '') + elif 'color' in typography: + color = scrapertools.find_single_match(typography, 'color ([a-zA-Z0-9]+)') + string = '[COLOR ' + color + ']' + string + '[/COLOR]' + typography = typography.replace('color ' + color, '') + if 'bold' in typography: + string = '[B]' + string + '[/B]' + typography = typography.replace('bold', '') + if 'italic' in typography: + string = '[I]' + string + '[/I]' + typography = typography.replace('italic', '') + if '__' in typography: + string = string + ' ' + typography = typography.replace('__', '') + if '_' in typography: + string = ' ' + string + typography = typography.replace('_', '') + if '--' in typography: + string = ' - ' + string + typography = typography.replace('--', '') + if 'bullet' in typography: + if VLT: string = '[B]' + "•" + '[/B] ' + string + else: string = string + typography = typography.replace('bullet', '') + typography = typography.strip() + if typography: string = string + '{' + typography + '}' + return string + + +########## HD PASS ########## + +def hdpass_get_servers(item, data=''): + def get_hosts(url, quality): + ret = [] + page = httptools.downloadpage(url, CF=False).data + mir = scrapertools.find_single_match(page, patron_mir) + + for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option): + mir_url = scrapertools.decodeHtmlentities(mir_url) + logger.debug(mir_url) + it = hdpass_get_url(item.clone(action='play', quality=quality, url=mir_url))[0] + # it = item.clone(action="play", quality=quality, title=srv, server=srv, url= mir_url) + # if not servertools.get_server_parameters(srv.lower()): it = hdpass_get_url(it)[0] # do not exists or it's empty + ret.append(it) + return ret + # Carica la pagina + itemlist = [] + if 'hdpass' in item.url or 'hdplayer' in item.url: url = item.url + else: + if not data: + data = httptools.downloadpage(item.url, CF=False).data.replace('\n', '') + patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>' + url = scrapertools.find_single_match(data, patron) + url = url.replace("&download=1", "") + if 'hdpass' not in url and 'hdplayer' not in url: return itemlist + if not url.startswith('http'): url = 'https:' + url + item.referer = url + + data = httptools.downloadpage(url, CF=False).data + patron_res = '<div class="buttons-bar resolutions-bar">(.*?)<div class="buttons-bar' + patron_mir = '<div class="buttons-bar hosts-bar">(.*?)(?:<div id="main-player|<script)' + patron_option = r'<a href="([^"]+?)"[^>]+>([^<]+?)</a' + + res = scrapertools.find_single_match(data, patron_res) + + # non threaded for webpdb + # for res_url, res_video in scrapertools.find_multiple_matches(res, patron_option): + # res_url = scrapertools.decodeHtmlentities(res_url) + # itemlist.extend(get_hosts(res_url, res_video)) + # + with futures.ThreadPoolExecutor() as executor: + thL = [] + for res_url, res_video in scrapertools.find_multiple_matches(res, patron_option): + res_url = scrapertools.decodeHtmlentities(res_url) + thL.append(executor.submit(get_hosts, res_url, res_video)) + for res in futures.as_completed(thL): + if res.result(): + itemlist.extend(res.result()) + + return server(item, itemlist=itemlist) + + +def hdpass_get_url(item): + data = httptools.downloadpage(item.url, CF=False).data + src = scrapertools.find_single_match(data, r'<iframe allowfullscreen custom-src="([^"]+)') + if src: item.url = base64.b64decode(src) + else: item.url = scrapertools.find_single_match(data, r'<iframe allowfullscreen src="([^"]+)') + item.url, c = unshortenit.unshorten_only(item.url) + return [item] + +########## SEARCH ########## + +def search(channel, item, texto): + logger.debug(item.url + " search " + texto) + item.url = channel.host + "/?s=" + texto + try: + return channel.peliculas(item) + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + +########## DOOPLAY ########## + +def dooplay_get_links(item, host, paramList=[]): + # get links from websites using dooplay theme and dooplay_player + # return a list of dict containing these values: url, title and server + if not paramList: + data = httptools.downloadpage(item.url).data.replace("'", '"') + patron = r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?' + matches = scrapertools.find_multiple_matches(data, patron) + else: + matches = paramList + ret = [] + + for type, post, nume, title, server in matches: + postData = urlencode({ + "action": "doo_player_ajax", + "post": post, + "nume": nume, + "type": type + }) + dataAdmin = httptools.downloadpage(host + '/wp-admin/admin-ajax.php', post=postData,headers={'Referer': item.url}).data + link = scrapertools.find_single_match(dataAdmin, r"<iframe.*src='([^']+)'") + if not link: link = scrapertools.find_single_match(dataAdmin, r'"embed_url":"([^"]+)"').replace('\\','') + ret.append({ + 'url': link, + 'title': title, + 'server': server + }) + + return ret + + +@scrape +def dooplay_get_episodes(item): + item.contentType = 'tvshow' + patron = '<li class="mark-[0-9]+">.*?<img.*?(?:data-lazy-)?src="(?P<thumb>[^"]+).*?(?P<episode>[0-9]+ - [0-9]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+).*?(?P<year>[0-9]{4})' + actLike = 'episodios' + + return locals() + + +@scrape +def dooplay_peliculas(item, mixed=False, blacklist=""): + actLike = 'peliculas' + # debug = True + if item.args == 'searchPage': + return dooplay_search_vars(item, blacklist) + else: + if item.contentType == 'movie': + action = 'findvideos' + patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+).*?)?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>[^<>]*(?P<year>[0-9]{4})</span>|</article>)' + else: + action = 'episodios' + patron = '<article id="post-[0-9]+" class="item (?P<type>' + ('\w+' if mixed else 'tvshows') + ')">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' + patronNext = '<div class="pagination">.*?class="current".*?<a href="([^"]+)".*?<div class="resppages">' + videlibraryEnabled = False + + if mixed: + typeActionDict={'findvideos': ['movies'], 'episodios': ['tvshows']} + typeContentDict={'film': ['movies'], 'serie': ['tvshows']} + + return locals() + + +@scrape +def dooplay_search(item, blacklist=""): + return dooplay_search_vars(item, blacklist) + + +def dooplay_search_vars(item, blacklist): + if item.contentType == 'list': # ricerca globale + type = '(?P<type>movies|tvshows)' + typeActionDict = {'findvideos': ['movies'], 'episodios': ['tvshows']} + typeContentDict = {'movie': ['movies'], 'tvshow': ['tvshows']} + elif item.contentType == 'movie': + type = 'movies' + action = 'findvideos' + else: + type = 'tvshows' + action = 'episodios' + patron = '<div class="result-item">.*?<img src="(?P<thumb>[^"]+)".*?<span class="' + type + '">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a>.*?<span class="year">(?P<year>[0-9]{4}).*?<div class="contenido"><p>(?P<plot>[^<>]+)' + patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"' + + return locals() + + +def dooplay_menu(item, type): + patronMenu = '<a href="(?P<url>[^"#]+)"(?: title="[^"]+")?>(?P<title>[a-zA-Z0-9]+)' + patronBlock = '<nav class="' + item.args + '">(?P<block>.*?)</nav>' + action = 'peliculas' + + return locals() + + +########## JWPLAYER ########## + +def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False): + from core import jsontools + video_urls = [] + block = scrapertools.find_single_match(data, r'sources"?\s*:\s*(.*?}])') if not dataIsBlock else data + if block: + json = jsontools.load(block) + if json: + sources = [] + for s in json: + if 'file' in s.keys(): + src = s['file'] + else: + src = s['src'] + sources.append((src, s.get('label'))) + else: + if 'file:' in block: + sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?') + elif 'src:' in block: + sources = scrapertools.find_multiple_matches(block, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:,[^,]+,\s*label:\s*"([^"]+)")?') + else: + sources =[(block.replace('"',''), '')] + for url, quality in sources: + quality = 'auto' if not quality else quality + if url.split('.')[-1] != 'mpd': + video_urls.append({'type':url.split('.')[-1], 'res':quality, 'url':url if not onlyHttp else url.replace('https://', 'http://')}) + + return video_urls + + +########## ITEMLIST DB FOR PAGINATION ########## + +def itemlistdb(itemlist=None): + from core import db + if itemlist: + db['itemlist']['itemlist'] = itemlist + else: + itemlist = db['itemlist'].get('itemlist',[]) + db.close() + return itemlist diff --git a/platformcode/autorenumber.py b/platformcode/autorenumber.py index b9aa0b23..026dbba2 100644 --- a/platformcode/autorenumber.py +++ b/platformcode/autorenumber.py @@ -109,6 +109,7 @@ class autorenumber(): self.selectspecials = False self.manual = False self.auto = False + if self.item: from core.videolibrarytools import check_renumber_options check_renumber_options(self.item) @@ -186,7 +187,7 @@ class autorenumber(): if number in self.episodes: item.contentSeason = int(self.episodes[number].split('x')[0]) item.contentEpisodeNumber = int(self.episodes[number].split('x')[1]) - + # support.dbg() # for i in self.itemlist: # sub_thread(i) @@ -203,7 +204,7 @@ class autorenumber(): self.epdict = {} self.group = self.renumberdict[self.title].get(GROUP, None) busy(True) - itemlist = find_episodes(self.item) + itemlist = self.itemlist if self.itemlist else find_episodes(self.item) if not self.group: diff --git a/platformcode/elementum_download.py b/platformcode/elementum_download.py index 588543fc..5d31f193 100644 --- a/platformcode/elementum_download.py +++ b/platformcode/elementum_download.py @@ -1,6 +1,6 @@ from core import filetools, downloadtools, support -from platformcode import config, platformtools, updater +from platformcode import config, platformtools, updater, logger import xbmc, xbmcaddon, sys, platform host = 'https://github.com' @@ -28,9 +28,9 @@ def download(item=None): if platformtools.dialog_yesno(config.get_localized_string(70784), config.get_localized_string(70782)): pform = get_platform() url = support.match(elementum_url, patronBlock=r'<div class="release-entry">(.*?)<!-- /.release-body -->', patron=r'<a href="([a-zA-Z0-9/\.-]+%s.zip)' % pform).match - support.info('OS:', pform) - support.info('Extract IN:', elementum_path) - support.info('URL:', url) + logger.debug('OS:', pform) + logger.debug('Extract IN:', elementum_path) + logger.debug('URL:', url) if url: downloadtools.downloadfile(host + url, filename) extract() @@ -44,19 +44,19 @@ def download(item=None): def extract(): import zipfile from platformcode.updater import fixZipGetHash - support.info('Estraggo Elementum in:', elementum_path) + logger.debug('Estraggo Elementum in:', elementum_path) try: # hash = fixZipGetHash(filename) - # support.info(hash) + # logger.debug(hash) with zipfile.ZipFile(filetools.file_open(filename, 'rb', vfs=False)) as zip_ref: zip_ref.extractall(xbmc.translatePath(addon_path)) except Exception as e: - support.info('Non sono riuscito ad estrarre il file zip') - support.logger.error(e) + logger.debug('Non sono riuscito ad estrarre il file zip') + logger.error(e) import traceback - support.logger.error(traceback.print_exc()) + logger.error(traceback.print_exc()) def setting(): @@ -74,7 +74,7 @@ def setting(): __settings__.setSetting('do_not_disturb', 'true') Continue = False except: - support.info('RIPROVO') + logger.debug('RIPROVO') xbmc.sleep(100) else: if not filetools.exists(elementum_path): diff --git a/platformcode/launcher.py b/platformcode/launcher.py index 563c7a49..657a44f9 100644 --- a/platformcode/launcher.py +++ b/platformcode/launcher.py @@ -161,12 +161,21 @@ def run(item=None): if page and int(page) > -1: import xbmc item.action = item.real_action - if item.page: - item.page = page - else: - import re - item.url = re.sub('([=/])[0-9]+(/?)$', '\g<1>{}\g<2>'.format(page), item.url) + item.page = page + import re + item.url = re.sub('([=/])[0-9]+(/?)$', '\g<1>{}\g<2>'.format(page), item.url) xbmc.executebuiltin("Container.Update(%s?%s)" % (sys.argv[0], item.tourl())) + elif item.action == "gotoseason": + head = 'Seleziona la stagione' + seasons = [str(s) for s in item.allSeasons] + season = platformtools.dialog_select(head, seasons, item.nextSeason - 1) + if int(season) > -1: + import xbmc + item.action = item.real_action + item.nextSeason = item.allSeasons.index(season + 1) + run(item) + # logger.debug(item) + # xbmc.executebuiltin("Container.Update(%s?%s)" % (sys.argv[0], new_item.tourl())) else: # Checks if channel exists if os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py")): diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 773f84ec..67010b3f 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -666,8 +666,8 @@ def set_context_commands(item, item_url, parent_item, **kwargs): return context_commands # Options according to criteria, only if the item is not a tag, nor is it "Add to the video library", etc... if item.action and item.action not in ["add_movie_to_library", "add_serie_to_library", "buscartrailer", "actualizar_titulos"]: - if item.nextPage: - context_commands.append((config.get_localized_string(70511), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=gotopage&real_action='+item.action))) + # if item.nextPage: + # context_commands.append((config.get_localized_string(70511), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=gotopage&real_action='+item.action))) # Show information: if the item has a plot, we assume that it is a series, season, chapter or movie # if item.infoLabels['plot'] and (num_version_xbmc < 17.0 or item.contentType == 'season'): # context_commands.append((config.get_localized_string(60348), "Action(Info)")) diff --git a/servers/torrent.py b/servers/torrent.py index 8f7d5b2a..a708489b 100755 --- a/servers/torrent.py +++ b/servers/torrent.py @@ -3,8 +3,8 @@ import re, os, sys, time, requests, xbmc, xbmcaddon from core import filetools, jsontools -from core.support import info, match -from platformcode import config, platformtools +from core.support import match +from platformcode import config, platformtools, logger from lib.guessit import guessit if sys.version_info[0] >= 3: @@ -26,7 +26,7 @@ def get_video_url(page_url, premium=False, user='', password='', video_password= if len(torrent_options) == 0: from platformcode import elementum_download elementum_download.download() - info('server=torrent, the url is the good') + logger.debug('server=torrent, the url is the good') if page_url.startswith('magnet:'): video_urls = [{'type':'magnet', 'url':page_url}] diff --git a/specials/downloads.py b/specials/downloads.py index 0ada4d32..7295c481 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -12,12 +12,11 @@ from past.utils import old_div import re, time, unicodedata, xbmc -from core.support import thumb +from core.support import thumb, typo from core import filetools, jsontools, scraper, scrapertools, servertools, videolibrarytools, support from core.downloader import Downloader from core.item import Item from platformcode import config, logger, platformtools -from core.support import info, typo from servers import torrent kb = '0xFF65B3DA' @@ -39,7 +38,7 @@ extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', '.m1v', '.m2 def mainlist(item): - info() + logger.debug() itemlist = [] # File list @@ -142,7 +141,7 @@ def settings(item): def browser(item): - info() + logger.debug() itemlist = [] for file in filetools.listdir(item.url): @@ -176,7 +175,7 @@ def del_dir(item): def clean_all(item): - info() + logger.debug() stop_all() removeFiles = False if platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(30300)): @@ -202,7 +201,7 @@ def reload(item): def stop_all(item=None): - info() + logger.debug() for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): if fichero.endswith(".json"): @@ -220,7 +219,7 @@ def stop_all(item=None): def clean_ready(item): - info() + logger.debug() for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): if fichero.endswith(".json"): download_item = Item().fromjson(filetools.read(filetools.join(DOWNLOAD_LIST_PATH, fichero))) @@ -232,7 +231,7 @@ def clean_ready(item): def restart_error(item): - info() + logger.debug() for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): if fichero.endswith(".json"): download_item = Item().fromjson(filetools.read(filetools.join(DOWNLOAD_LIST_PATH, fichero))) @@ -271,7 +270,7 @@ def download_all_background(item): def menu(item): - info(item) + logger.debug(item) if item.downloadServer: servidor = item.downloadServer.get("server", "Auto") else: @@ -367,7 +366,7 @@ def menu(item): def move_to_libray(item): - info() + logger.debug() if item.contentType == 'movie': FOLDER = FOLDER_MOVIES @@ -421,7 +420,7 @@ def move_to_libray(item): if filename.startswith(name) and (filename.endswith('.strm') or (filename.endswith('.json') and 'downloads' not in filename)): clean = True file_path = filetools.join(config.get_setting("videolibrarypath"), FOLDER, path_title, File) - info('Delete File:', str(file_path)) + logger.debug('Delete File:', str(file_path)) filetools.remove(file_path) if file_path.endswith('.strm'): file_strm_path = file_path @@ -577,7 +576,7 @@ def sort_method(item): def download_from_url(url, item): - info("Attempting to download:", url) + logger.debug("Attempting to download:", url) if '.m3u8' in url.lower().split('|')[0] or url.lower().startswith("rtmp"): save_server_statistics(item.server, 0, False) platformtools.dialog_notification('m3u8 Download',config.get_localized_string(60364), sound=False) @@ -606,22 +605,22 @@ def download_from_url(url, item): update_json(item.path, {"downloadUrl": d.download_url, "downloadStatus": STATUS_CODES.downloading, "downloadSize": d.size[0], "downloadProgress": d.progress, "downloadCompleted": d.downloaded[0], "downloadFilename": file}) - d.start_dialog(config.get_localized_string(60332)) + d.start_dialog(config.get_localized_string(30006)) # Download stopped. We get the state: # Download failed if d.state == d.states.error: - info("Error trying to download", url) + logger.debug("Error trying to download", url) status = STATUS_CODES.error # Download has stopped elif d.state == d.states.stopped: - info("Stop download") + logger.debug("Stop download") status = STATUS_CODES.canceled # Download is complete elif d.state == d.states.completed: - info("Downloaded correctly") + logger.debug("Downloaded correctly") status = STATUS_CODES.completed if (item.downloadSize and item.downloadSize != d.size[0]) or d.size[0] < 5000000: # if size don't correspond or file is too little (gounlimited for example send a little video to say the server is overloaded) @@ -637,7 +636,7 @@ def download_from_url(url, item): def download_from_server(item): - info(item.tostring()) + logger.debug(item.tostring()) unsupported_servers = ["torrent"] if item.contentChannel == 'local': @@ -667,11 +666,11 @@ def download_from_server(item): item.video_urls = itemlist if not item.server: item.server = "directo" else: - info("There is nothing to reproduce") + logger.debug("There is nothing to reproduce") return {"downloadStatus": STATUS_CODES.error} finally: progreso.close() - info("contentAction: %s | contentChannel: %s | server: %s | url: %s" % (item.contentAction, item.contentChannel, item.server, item.url)) + logger.debug("contentAction: %s | contentChannel: %s | server: %s | url: %s" % (item.contentAction, item.contentChannel, item.server, item.url)) if item.server == 'torrent': import xbmcgui @@ -691,11 +690,11 @@ def download_from_server(item): # If it is not available, we go out if not puedes: - info("The video is NOT available") + logger.debug("The video is NOT available") return {"downloadStatus": STATUS_CODES.error} else: - info("YES Video is available") + logger.debug("YES Video is available") result = {} @@ -716,14 +715,14 @@ def download_from_server(item): def download_from_best_server(item): - info("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + logger.debug("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) result = {"downloadStatus": STATUS_CODES.error} progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70179)) try: if item.downloadItemlist: - info('using cached servers') + logger.debug('using cached servers') play_items = [Item().fromurl(i) for i in item.downloadItemlist] else: if item.contentChannel in ['community', 'videolibrary']: @@ -772,11 +771,11 @@ def download_from_best_server(item): def select_server(item): if item.server: return "Auto" - info("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + logger.debug("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70179)) try: if item.downloadItemlist: - info('using cached servers') + logger.debug('using cached servers') play_items = [Item().fromurl(i) for i in item.downloadItemlist] else: if item.contentChannel in ['community', 'videolibrary']: @@ -818,7 +817,7 @@ def select_server(item): def start_download(item): - info("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + logger.debug("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) # We already have a server, we just need to download if item.contentAction == "play": ret = download_from_server(item) @@ -838,7 +837,7 @@ def start_download(item): def get_episodes(item): - info("contentAction: %s | contentChannel: %s | contentType: %s" % (item.contentAction, item.contentChannel, item.contentType)) + logger.debug("contentAction: %s | contentChannel: %s | contentType: %s" % (item.contentAction, item.contentChannel, item.contentType)) if 'dlseason' in item: season = True @@ -915,7 +914,7 @@ def get_episodes(item): # Any other result is not worth it, we ignore it else: - info("Omitiendo item no válido:", episode.tostring()) + logger.debug("Omitiendo item no válido:", episode.tostring()) # Any other result is not worth it, we ignore it... # itemlist = videolibrarytools.filter_list(itemlist) @@ -924,7 +923,7 @@ def get_episodes(item): def write_json(item): - info() + logger.debug() channel = item.from_channel if item.from_channel else item.channel item.action = "menu" @@ -969,7 +968,7 @@ def save_download(item): def save_download_background(item): - info() + logger.debug() # Menu contextual if item.from_action and item.from_channel: item.channel = item.from_channel @@ -1018,7 +1017,7 @@ def save_download_background(item): def save_download_videolibrary(item): - info() + logger.debug() show_disclaimer() item.contentChannel = 'videolibrary' item.channel = "downloads" @@ -1027,7 +1026,7 @@ def save_download_videolibrary(item): def save_download_video(item): - info("contentAction: %s | contentChannel: %s | contentTitle: %s" % (item.contentAction, item.contentChannel, item.contentTitle)) + logger.debug("contentAction: %s | contentChannel: %s | contentTitle: %s" % (item.contentAction, item.contentChannel, item.contentTitle)) set_movie_title(item) @@ -1042,7 +1041,7 @@ def save_download_video(item): def save_download_movie(item): - info("contentAction: %s | contentChannel: %s | contentTitle: %s" % ( item.contentAction, item.contentChannel, item.contentTitle)) + logger.debug("contentAction: %s | contentChannel: %s | contentTitle: %s" % ( item.contentAction, item.contentChannel, item.contentTitle)) progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70191)) @@ -1076,7 +1075,7 @@ def save_download_movie(item): def save_download_tvshow(item): - info("contentAction: %s | contentChannel: %s | contentType: %s | contentSerieName: %s" % (item.contentAction, item.contentChannel, item.contentType, item.contentSerieName)) + logger.debug("contentAction: %s | contentChannel: %s | contentType: %s | contentSerieName: %s" % (item.contentAction, item.contentChannel, item.contentType, item.contentSerieName)) progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70188)) try: item.show = item.fulltitle diff --git a/specials/globalsearch.py b/specials/globalsearch.py index 8232e770..2e33c282 100644 --- a/specials/globalsearch.py +++ b/specials/globalsearch.py @@ -166,7 +166,7 @@ class SearchWindow(xbmcgui.WindowXML): tmdb_info = tmdb.Tmdb(searched_text=self.item.text, search_type=self.item.mode.replace('show', '')) results = tmdb_info.results - + def make(n, result): result = tmdb_info.get_infoLabels(result, origen=result) if self.item.mode == 'movie': diff --git a/specials/news.py b/specials/news.py index 8d1e87ee..05274fce 100644 --- a/specials/news.py +++ b/specials/news.py @@ -388,7 +388,7 @@ def get_newest(channel_id, categoria): def get_title(item): - # support.info("ITEM NEWEST ->", item) + # logger.debug("ITEM NEWEST ->", item) # item.contentSerieName c'è anche se è un film if item.contentSerieName and item.contentType != 'movie': # Si es una serie title = item.contentSerieName @@ -442,7 +442,7 @@ def no_group(list_result_canal): global channels_id_name for i in list_result_canal: - # support.info("NO GROUP i -> ", i) + # logger.debug("NO GROUP i -> ", i) canale = channels_id_name[i.channel] canale = canale # to differentiate it from the color of the other items i.title = get_title(i) + " [" + canale + "]" diff --git a/specials/url.py b/specials/url.py index 368d1400..9f7d58d6 100644 --- a/specials/url.py +++ b/specials/url.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- from core import servertools -from core.support import match, info, server +from core.support import match, server from core.item import Item from platformcode import config, logger def mainlist(item): - info() + logger.debug() itemlist = [] itemlist.append(Item(channel=item.channel, action="search", title=config.get_localized_string(60089), thumbnail=item.thumbnail, args='server')) @@ -19,7 +19,7 @@ def mainlist(item): # When the function "search" is called, the launcher asks for a text to search for and adds it as a parameter def search(item, text): - info(text) + logger.debug(text) if not text.startswith("http"): text = "http://" + text