From b4376525deef41a722aadbcb53501a8d68eefe7b Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 8 Jan 2020 19:19:59 +0100 Subject: [PATCH] KoD 0.7 - nuovo metodo di override DNS - aggiunta opzione nascondi server, se usi l'autoplay - migliorie al codice e fix vari --- addon.xml | 8 +- channels.json | 25 +- channels/0example.py.txt | 9 +- channels/altadefinizione01.py | 24 +- channels/altadefinizione01_link.py | 2 +- channels/altadefinizioneclick.py | 7 +- channels/animeforce.py | 7 +- channels/animeleggendari.py | 3 +- channels/animespace.py | 3 +- channels/animesubita.py | 3 +- channels/animetubeita.py | 7 +- channels/animeworld.py | 11 +- channels/casacinema.py | 7 +- channels/casacinemaInfo.py | 28 +- channels/cb01anime.py | 6 +- channels/cineblog01.py | 42 +- channels/cinemalibero.py | 11 +- channels/cinetecadibologna.json | 34 +- channels/cinetecadibologna.py | 179 +- channels/documentaristreamingda.py | 3 +- channels/dreamsub.py | 19 +- channels/eurostreaming.py | 28 +- channels/fastsubita.py | 7 +- channels/filmigratis.py | 3 +- channels/filmpertutti.py | 13 +- channels/filmsenzalimiticc.py | 3 +- channels/guardaserieclick.py | 3 +- channels/hdblog.json | 37 - channels/hdblog.py | 94 - channels/ilgeniodellostreaming.py | 9 +- channels/ilgiramondo.json | 37 - channels/ilgiramondo.py | 67 - channels/istitutoluce.json | 37 - channels/istitutoluce.py | 288 --- channels/italiafilm.json | 44 - channels/italiafilm.py | 497 ----- channels/italiaserie.py | 9 +- channels/mondoserietv.py | 3 +- channels/netfreex.py | 3 +- channels/piratestreaming.py | 3 +- channels/polpotv.py | 3 +- channels/programmazione.json | 21 - channels/programmazione.py | 71 - channels/renumbertools.py | 1001 ---------- channels/ricettevideo.json | 37 - channels/ricettevideo.py | 58 - channels/seriehd.py | 28 +- channels/serietvonline.py | 29 +- channels/serietvsubita.py | 3 +- channels/serietvu.py | 9 +- channels/streamingaltadefinizione.py | 20 +- channels/streamtime.py | 9 +- channels/tantifilm.py | 65 +- channels/toonitalia.py | 3 +- channels/vedohd.py | 7 +- channels/vvvvid.py | 4 +- core/cloudflare.py | 112 -- core/httptools.py | 409 ++--- core/proxytools.py | 1 - core/scrapertools.py | 158 +- core/scrapertoolsV2.py | 346 ---- core/servertools.py | 4 +- core/support.py | 96 +- default.py | 6 +- lib/doh.py | 77 + lib/gktools.py | 315 ---- lib/jjdecode.py | 312 ---- lib/js2py/base.py | 13 +- lib/js2py/evaljs.py | 32 +- .../internals/constructors/jsfunction.py | 5 +- lib/js2py/internals/prototypes/jsstring.py | 2 +- lib/js2py/node_import.py | 50 +- lib/js2py/prototypes/jsfunction.py | 5 +- lib/js2py/test_internals.py | 9 - lib/js2py/translators/translating_nodes.py | 119 +- lib/js2py/utils/injector.py | 11 +- lib/jsc.py | 83 - lib/jscrypto.py | 550 ------ lib/jsinterpreter.py | 249 --- lib/pafy/__init__.py | 15 - lib/pafy/pafy.py | 1618 ----------------- lib/pyjsparser/__init__.py | 5 +- lib/pyjsparser/parser.py | 1468 ++++++++------- lib/pyjsparser/pyjsparserdata.py | 492 +++-- lib/pyjsparser/std_nodes.py | 153 +- lib/requests_toolbelt/__init__.py | 34 + lib/requests_toolbelt/_compat.py | 324 ++++ lib/requests_toolbelt/adapters/__init__.py | 15 + lib/requests_toolbelt/adapters/appengine.py | 206 +++ lib/requests_toolbelt/adapters/fingerprint.py | 48 + .../adapters/host_header_ssl.py | 43 + .../adapters/socket_options.py | 129 ++ lib/requests_toolbelt/adapters/source.py | 67 + lib/requests_toolbelt/adapters/ssl.py | 66 + lib/requests_toolbelt/adapters/x509.py | 178 ++ lib/requests_toolbelt/auth/__init__.py | 0 .../auth/_digest_auth_compat.py | 29 + lib/requests_toolbelt/auth/guess.py | 146 ++ lib/requests_toolbelt/auth/handler.py | 142 ++ .../auth/http_proxy_digest.py | 103 ++ lib/requests_toolbelt/cookies/__init__.py | 0 lib/requests_toolbelt/cookies/forgetful.py | 7 + .../downloadutils/__init__.py | 0 lib/requests_toolbelt/downloadutils/stream.py | 177 ++ lib/requests_toolbelt/downloadutils/tee.py | 123 ++ lib/requests_toolbelt/exceptions.py | 37 + lib/requests_toolbelt/multipart/__init__.py | 31 + lib/requests_toolbelt/multipart/decoder.py | 156 ++ lib/requests_toolbelt/multipart/encoder.py | 655 +++++++ lib/requests_toolbelt/sessions.py | 70 + lib/requests_toolbelt/streaming_iterator.py | 116 ++ lib/requests_toolbelt/threaded/__init__.py | 97 + lib/requests_toolbelt/threaded/pool.py | 211 +++ lib/requests_toolbelt/threaded/thread.py | 53 + lib/requests_toolbelt/utils/__init__.py | 0 lib/requests_toolbelt/utils/deprecated.py | 91 + lib/requests_toolbelt/utils/dump.py | 197 ++ lib/requests_toolbelt/utils/formdata.py | 108 ++ lib/requests_toolbelt/utils/user_agent.py | 143 ++ platformcode/config.py | 28 +- platformcode/launcher.py | 56 +- platformcode/platformtools.py | 34 +- platformcode/updater.py | 34 +- resources/language/English/strings.po | 19 +- resources/language/Italian/strings.po | 28 +- resources/settings.xml | 25 +- servers/decrypters/zcrypt.py | 6 +- servers/hdload.py | 2 +- servers/mixdrop.py | 53 +- servers/onlystream.py | 8 +- servers/supervideo.py | 8 +- specials/autoplay.py | 19 +- specials/autorenumber.py | 16 +- specials/community.py | 63 +- specials/resolverdns.py | 186 +- specials/search.py | 8 +- specials/searchall.py | 866 --------- specials/setting.py | 12 +- videolibrary_service.py | 8 +- 139 files changed, 6078 insertions(+), 8909 deletions(-) delete mode 100644 channels/hdblog.json delete mode 100644 channels/hdblog.py delete mode 100644 channels/ilgiramondo.json delete mode 100644 channels/ilgiramondo.py delete mode 100644 channels/istitutoluce.json delete mode 100644 channels/istitutoluce.py delete mode 100644 channels/italiafilm.json delete mode 100644 channels/italiafilm.py delete mode 100644 channels/programmazione.json delete mode 100644 channels/programmazione.py delete mode 100644 channels/renumbertools.py delete mode 100644 channels/ricettevideo.json delete mode 100644 channels/ricettevideo.py delete mode 100644 core/cloudflare.py delete mode 100644 core/proxytools.py delete mode 100644 core/scrapertoolsV2.py create mode 100644 lib/doh.py delete mode 100644 lib/gktools.py delete mode 100644 lib/jjdecode.py delete mode 100644 lib/js2py/test_internals.py delete mode 100644 lib/jsc.py delete mode 100644 lib/jscrypto.py delete mode 100644 lib/jsinterpreter.py delete mode 100644 lib/pafy/__init__.py delete mode 100644 lib/pafy/pafy.py create mode 100644 lib/requests_toolbelt/__init__.py create mode 100644 lib/requests_toolbelt/_compat.py create mode 100644 lib/requests_toolbelt/adapters/__init__.py create mode 100644 lib/requests_toolbelt/adapters/appengine.py create mode 100644 lib/requests_toolbelt/adapters/fingerprint.py create mode 100644 lib/requests_toolbelt/adapters/host_header_ssl.py create mode 100644 lib/requests_toolbelt/adapters/socket_options.py create mode 100644 lib/requests_toolbelt/adapters/source.py create mode 100644 lib/requests_toolbelt/adapters/ssl.py create mode 100644 lib/requests_toolbelt/adapters/x509.py create mode 100644 lib/requests_toolbelt/auth/__init__.py create mode 100644 lib/requests_toolbelt/auth/_digest_auth_compat.py create mode 100644 lib/requests_toolbelt/auth/guess.py create mode 100644 lib/requests_toolbelt/auth/handler.py create mode 100644 lib/requests_toolbelt/auth/http_proxy_digest.py create mode 100644 lib/requests_toolbelt/cookies/__init__.py create mode 100644 lib/requests_toolbelt/cookies/forgetful.py create mode 100644 lib/requests_toolbelt/downloadutils/__init__.py create mode 100644 lib/requests_toolbelt/downloadutils/stream.py create mode 100644 lib/requests_toolbelt/downloadutils/tee.py create mode 100644 lib/requests_toolbelt/exceptions.py create mode 100644 lib/requests_toolbelt/multipart/__init__.py create mode 100644 lib/requests_toolbelt/multipart/decoder.py create mode 100644 lib/requests_toolbelt/multipart/encoder.py create mode 100644 lib/requests_toolbelt/sessions.py create mode 100644 lib/requests_toolbelt/streaming_iterator.py create mode 100644 lib/requests_toolbelt/threaded/__init__.py create mode 100644 lib/requests_toolbelt/threaded/pool.py create mode 100644 lib/requests_toolbelt/threaded/thread.py create mode 100644 lib/requests_toolbelt/utils/__init__.py create mode 100644 lib/requests_toolbelt/utils/deprecated.py create mode 100644 lib/requests_toolbelt/utils/dump.py create mode 100644 lib/requests_toolbelt/utils/formdata.py create mode 100644 lib/requests_toolbelt/utils/user_agent.py delete mode 100644 specials/searchall.py diff --git a/addon.xml b/addon.xml index b64a18a7..08ef47c0 100644 --- a/addon.xml +++ b/addon.xml @@ -1,4 +1,4 @@ - + @@ -19,9 +19,9 @@ resources/media/themes/ss/2.png resources/media/themes/ss/3.png - -Nuova ricerca globale - -migliorie prestazionali in generale - -fix vari ai server + - nuovo metodo di override DNS +- aggiunta opzione nascondi server, se usi l'autoplay +- migliorie al codice e fix vari Naviga velocemente sul web e guarda i contenuti presenti [COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR] [COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR] diff --git a/channels.json b/channels.json index 1ef4373b..77b5ba1f 100644 --- a/channels.json +++ b/channels.json @@ -1,34 +1,31 @@ { - "altadefinizione01": "https://www.altadefinizione01.cc", - "altadefinizione01_club": "https://www.altadefinizione01.cc", - "altadefinizione01_link": "http://altadefinizione01.cx", - "altadefinizioneclick": "https://altadefinizione.cloud", - "animeforce": "https://ww1.animeforce.org", + "altadefinizione01": "https://www.altadefinizione01.tel", + "altadefinizione01_link": "https://altadefinizione01.date", + "animeforce": "https://ww1.animeforce.org", "animeleggendari": "https://animepertutti.com", - "animespace": "http://www.animespace.tv", + "animespace": "https://animespace.tv", "animestream": "https://www.animeworld.it", "animesubita": "http://www.animesubita.org", "animetubeita": "http://www.animetubeita.com", "animeworld": "https://www1.animeworld.tv", - "casacinema": "https://www.casacinema.uno", - "casacinemainfo": "https://www.casacinema.info", + "casacinema": "https://www.casacinema.cloud", + "casacinemaInfo": "https://casacinema.space", "cb01anime": "https://www.cineblog01.ink", "cinemalibero": "https://www.cinemalibero.live", + "cinetecadibologna" : "http://cinestore.cinetecadibologna.it", "documentaristreamingda": "https://documentari-streaming-da.com", - "dreamsub": "https://www.dreamsub.stream", - "eurostreaming": "https://eurostreaming.pink", + "dreamsub": "https://www.dreamsub.stream", "fastsubita": "https://fastsubita.com", - "filmgratis": "https://www.filmaltadefinizione.net", + "filmgratis": "https://www.filmaltadefinizione.org", "filmigratis": "https://filmigratis.org", - "filmpertutti": "https://www.filmpertutti.gratis", + "filmpertutti": "https://www.filmpertutti.casa", "filmsenzalimiticc": "https://www.filmsenzalimiti.monster", "filmstreaming01": "https://filmstreaming01.com", "guardarefilm": "https://www.guardarefilm.red", "guardaserie_stream": "https://guardaserie.co", "guardaserieclick": "https://www.guardaserie.media", "ilgeniodellostreaming": "https://igds.red", - "italiafilm": "https://www.italia-film.video", - "italiaserie": "https://italiaserie.org", + "italiaserie": "https://italiaserie.org", "mondoserietv": "https://mondoserietv.com", "netfreex": "https://www.netfreex.online", "piratestreaming": "https://www.piratestreaming.gratis", diff --git a/channels/0example.py.txt b/channels/0example.py.txt index f418da4a..2d5bb5ea 100644 --- a/channels/0example.py.txt +++ b/channels/0example.py.txt @@ -48,7 +48,7 @@ from core import support from platformcode import config # in caso di necessità -#from core import scrapertoolsV2, httptools, servertools, tmdb +#from core import scrapertools, httptools, servertools, tmdb from core.item import Item # per newest #from lib import unshortenit @@ -74,10 +74,9 @@ def findhost(): permUrl = httptools.downloadpage('INSERIRE-URL-QUI', follow_redirects=False).headers host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') # cancellare host non utilizzato - host = scrapertoolsV2.find_single_match(permUrl, r'
\s+<', '> <', data) - block = scrapertoolsV2.find_single_match(data, r'') + block = scrapertools.find_single_match(data, r'') if re.findall('', data, re.IGNORECASE): support.log('select = ### è una serie ###') return episodios(Item(channel=item.channel, diff --git a/channels/altadefinizione01.py b/channels/altadefinizione01.py index ea96dc4d..afc6e518 100644 --- a/channels/altadefinizione01.py +++ b/channels/altadefinizione01.py @@ -13,27 +13,27 @@ Ulteriori info: """ -from core import scrapertoolsV2, httptools, support +from core import scrapertools, httptools, support from core.item import Item from platformcode import config, logger #impostati dinamicamente da findhost() -host = "https://www.altadefinizione01.cc" -headers = "" def findhost(): - pass - # global host, headers - # data = httptools.downloadpage('https://altadefinizione01-nuovo.link/').data - # host = scrapertoolsV2.find_single_match(data, '
""") - if not url: url = support.scrapertoolsV2.find_single_match(data, 'file: "([^"]+)"') + url = support.scrapertools.find_single_match(data, """""") + if not url: url = support.scrapertools.find_single_match(data, 'file: "([^"]+)"') if url: url += '|' + urllib.urlencode(headers) itemlist.append( diff --git a/channels/animeworld.py b/channels/animeworld.py index 00b27b91..4fb56587 100644 --- a/channels/animeworld.py +++ b/channels/animeworld.py @@ -5,10 +5,11 @@ from core import support, jsontools -__channel__ = "animeworld" -host = support.config.get_channel_url(__channel__) +host = support.config.get_channel_url() headers = [['Referer', host]] +__channel__ = 'animeworld' + list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo'] list_quality = ['default', '480p', '720p', '1080p'] @@ -146,9 +147,9 @@ def findvideos(item): videoData = '' for serverid in matches: - if not item.number: item.number = support.scrapertoolsV2.find_single_match(item.title,r'(\d+) -') - block = support.scrapertoolsV2.find_multiple_matches(data,'data-id="' + serverid + '">(.*?)
(.*?)
\s+<', '> <', data) if 'continua con il video' in data.lower(): -## block = scrapertoolsV2.find_single_match(data, r'
(.*?)
') +## block = scrapertools.find_single_match(data, r'
(.*?)
') ## if re.findall('rel="category tag">serie', data, re.IGNORECASE): support.log('select = ### è un film ###') return findvideos(Item(channel=item.channel, diff --git a/channels/casacinemaInfo.py b/channels/casacinemaInfo.py index d0cf47ab..c7afaf19 100644 --- a/channels/casacinemaInfo.py +++ b/channels/casacinemaInfo.py @@ -19,21 +19,19 @@ """ from core import support -from core import scrapertoolsV2, httptools +from core import scrapertools, httptools from core.item import Item -host = "https://casacinema.stream" -headers = "" -def findhost(): - pass - # global host, headers - # data = httptools.downloadpage('https://casacinema.nuovo.link').data - # host = scrapertoolsV2.find_single_match(data, r'
', categoria) - findhost() + itemlist = [] item = Item() diff --git a/channels/cb01anime.py b/channels/cb01anime.py index b996eb58..d08c70b2 100644 --- a/channels/cb01anime.py +++ b/channels/cb01anime.py @@ -6,15 +6,11 @@ from core import support -__channel__ = "cb01anime" -host = support.config.get_channel_url(__channel__) + '/anime' +host = support.config.get_channel_url() + '/anime' Blacklist = ['AVVISO IMPORTANTE – CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE','Lista Richieste Up & Re-Up'] list_servers = ['verystream', 'openload', 'streamango', 'thevideome'] list_quality = ['1080p', '720p', '480p', '360'] - -checklinks = support.config.get_setting('checklinks', __channel__) -checklinks_number = support.config.get_setting('checklinks_number', __channel__) headers = [['Referer', host]] @support.menu diff --git a/channels/cineblog01.py b/channels/cineblog01.py index 7766b190..b4c23f11 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -5,28 +5,22 @@ import re -from core import scrapertoolsV2, httptools, servertools, tmdb, support +from core import scrapertools, httptools, servertools, tmdb, support from core.item import Item from lib import unshortenit from platformcode import logger, config -#impostati dinamicamente da findhost() -host = "https://cb01.nl" -headers = "" - def findhost(): - pass - # global host, headers - # permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers - # if 'google' in permUrl['location']: - # if host[:4] != 'http': - # host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') - # else: - # host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') - # else: - # host = permUrl['location'] - # headers = [['Referer', host]] + permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers + if 'google' in permUrl['location']: + host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') + else: + host = permUrl['location'] + return host + +host = config.get_channel_url(findhost) +headers = [['Referer', host]] list_servers = ['verystream', 'openload', 'streamango', 'wstream'] list_quality = ['HD', 'SD', 'default'] @@ -37,7 +31,6 @@ checklinks_number = config.get_setting('checklinks_number', 'cineblog01') @support.menu def mainlist(item): - findhost() film = [ ('HD', ['', 'menu', 'Film HD Streaming']), ('Generi', ['', 'menu', 'Film per Genere']), @@ -60,7 +53,6 @@ def mainlist(item): @support.scrape def menu(item): - findhost() patronBlock = item.args + r'<\/span>.*?(?P.*?)<\/ul>' patronMenu = r'href="?(?P[^">]+)"?>(?P.*?)<\/a>' action = 'peliculas' @@ -70,7 +62,7 @@ def menu(item): # @support.scrape # def newest(categoria): -# findhost() +# # # debug = True # patron = r'<a href="?(?P<url>[^">]+)"?>(?P<title>[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N|SUB-ITA)\])?\s*(?:\[(?P<quality>HD|SD|HD/3D)\])?\s*\((?P<year>[0-9]{4})\)<\/a>' @@ -100,7 +92,7 @@ def menu(item): def newest(categoria): support.log(categoria) - findhost() + item = support.Item() try: if categoria == "series": @@ -175,13 +167,13 @@ def episodios(item): def findvideos(item): - findhost() + if item.contentType == "episode": return findvid_serie(item) def load_links(itemlist, re_txt, color, desc_txt, quality=""): - streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '') + streaming = scrapertools.find_single_match(data, re_txt).replace('"', '') support.log('STREAMING',streaming) support.log('STREAMING=', streaming) # patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<' @@ -215,7 +207,7 @@ def findvideos(item): matches = re.compile(patronvideos, re.DOTALL).finditer(data) QualityStr = "" for match in matches: - QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:] + QualityStr = scrapertools.decodeHtmlentities(match.group(1))[6:] # Estrae i contenuti - Streaming load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "orange", "Streaming", "SD") @@ -315,12 +307,12 @@ def play(item): data = httptools.downloadpage(item.url).data if "window.location.href" in data: try: - data = scrapertoolsV2.find_single_match(data, 'window.location.href = "([^"]+)";') + data = scrapertools.find_single_match(data, 'window.location.href = "([^"]+)";') except IndexError: data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "") data, c = unshortenit.unwrap_30x_only(data) else: - data = scrapertoolsV2.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') + data = scrapertools.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') logger.debug("##### play go.php data ##\n%s\n##" % data) else: diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index 3ebd21c3..0aee93a2 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -5,15 +5,14 @@ import re -from core import httptools, support, scrapertoolsV2 +from core import httptools, support, scrapertools from core.item import Item from platformcode import config list_servers = ['akstream', 'wstream', 'backin', 'clipwatching', 'cloudvideo', 'verystream', 'onlystream', 'mixdrop'] list_quality = ['default'] -__channel__ = "cinemalibero" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] @support.menu @@ -90,7 +89,7 @@ def episodios(item): patronBlock = r'<p><strong>(?:.+?[Ss]tagione\s)?(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:</span|</p)' item.contentType = 'tvshow' def itemHook(item): - if not scrapertoolsV2.find_single_match(item.title, r'(\d+x\d+)'): + if not scrapertools.find_single_match(item.title, r'(\d+x\d+)'): item.title = re.sub(r'(\d+) -', '1x\\1', item.title) return item @@ -149,7 +148,7 @@ def check(item): support.log() data = httptools.downloadpage(item.url, headers=headers).data if data: - blockAnime = scrapertoolsV2.find_single_match(data, r'<div id="container" class="container">(.+?<div style="margin-left)') + blockAnime = scrapertools.find_single_match(data, r'<div id="container" class="container">(.+?<div style="margin-left)') if blockAnime and ('episodio' in blockAnime.lower() or 'saga' in blockAnime.lower()): item.contentType = 'tvshow' @@ -157,7 +156,7 @@ def check(item): item.data = blockAnime return episodios(item) - elif scrapertoolsV2.find_single_match(blockAnime,r'\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+'): + elif scrapertools.find_single_match(blockAnime, r'\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+'): item.contentType = 'tvshow' item.data = data return episodios(item) diff --git a/channels/cinetecadibologna.json b/channels/cinetecadibologna.json index 6c6f06ab..d398eb63 100644 --- a/channels/cinetecadibologna.json +++ b/channels/cinetecadibologna.json @@ -1,36 +1,12 @@ { "id": "cinetecadibologna", - "name": "Cinetecadibologna", + "name": "Cineteca di Bologna", "language": ["ita"], "active": true, "adult": false, - "thumbnail": "http://cinestore.cinetecadibologna.it/pics/logo.gif", - "banner": "http://cinestore.cinetecadibologna.it/pics/logo.gif", + "thumbnail": "cinetecadibologna.png", + "banner": "cinetecadibologna.png", "categories": ["documentary"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false - }, - { - "id": "include_in_newest_documentales", - "type": "bool", - "label": "Includi in Novità - Documentari", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - } - ] + "not_active":["include_in_newest"], + "settings": [] } diff --git a/channels/cinetecadibologna.py b/channels/cinetecadibologna.py index 4af82cd8..892c3a19 100644 --- a/channels/cinetecadibologna.py +++ b/channels/cinetecadibologna.py @@ -12,145 +12,78 @@ from core import httptools, scrapertools from core.item import Item from platformcode import logger, config -host = "http://cinestore.cinetecadibologna.it" +from core import support + +host = support.config.get_channel_url() + headers = [['Referer', host]] - +@support.menu def mainlist(item): - logger.info("kod.cinetecadibologna mainlist") - itemlist = [Item(channel=item.channel, - title="[COLOR azure]Elenco Film - Cineteca di Bologna[/COLOR]", - action="peliculas", - url="%s/video/alfabetico_completo" % host, - thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"), - Item(channel=item.channel, - title="[COLOR azure]Epoche - Cineteca di Bologna[/COLOR]", - action="epoche", - url="%s/video/epoche" % host, - thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"), - Item(channel=item.channel, - title="[COLOR azure]Percorsi Tematici - Cineteca di Bologna[/COLOR]", - action="percorsi", - url="%s/video/percorsi" % host, - thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif")] - - return itemlist + film = ['/video/alfabetico_completo', + ('Anni',['/video/epoche', 'menu']), + ('Registi',['/video/registi', 'menu']), + ('Attori',['/video/attori', 'menu']), + ('Percorsi Tematici',['/video/percorsi','menu'])] + return locals() +@support.scrape +def menu(item): + action = 'peliculas' + if 'epoche' in item.url: + patronMenu =r'<li>\s*<a href="(?P<url>[^"]+)">(?P<title>[^>]+)<' + elif 'percorsi' in item.url: + patron = r'<div class="cover_percorso">\s*<a href="(?P<url>[^"]+)">\s*<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>(?P<title>.*?)<' + else: + patron = r'<h2>\s*<a href="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)<' + patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc' + return locals() + + +def search(item, text): + support.log(text) + item.args = 'noorder' + item.url = host + '/ricerca/type_ALL/ricerca_' + text + item.contentType = 'movie' + try: + return peliculas(item) + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + support.logger.error("%s" % line) + return [] + + + +@support.scrape def peliculas(item): - logger.info("kod.cinetecadibologna peliculas") - itemlist = [] + if 'alfabetico' in item.url: + patron = r'<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?:\[)?(?P<title>[^\]<]+)(?:\]|<)' + else: + if 'type_ALL' in item.url: patronBlock = r'Video:(?P<block>.*?)(?:<div class=""|<!--)' + elif not 'NomePersona' in item.url: patronBlock = r'<h3>Film</h3>(?P<block>.*?)<div class="list_wrapper' + patron = r'<a href="(?P<url>[^"]+)"\s*class="[^"]+"\s*title="(?:\[)?(?P<title>[^\]"]+)(?:\])?"\s*rel="(?P<thumb>[^"]+)"' + patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc' + return locals() - # Carica la pagina - data = httptools.downloadpage(item.url, headers=headers).data - - # Estrae i contenuti - patron = '<img src="([^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="([^"]+)"[^>]+>(.*?)<' - matches = re.compile(patron, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for scrapedthumbnail, scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) - scrapedthumbnail = host + scrapedthumbnail - scrapedurl = host + scrapedurl - if not "/video/" in scrapedurl: - continue -## html = scrapertools.cache_page(scrapedurl) - html = httptools.downloadpage(scrapedurl, headers=headers).data - start = html.find("Sinossi:") - end = html.find('<div class="sx_col">', start) - scrapedplot = html[start:end] - scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot) - scrapedplot = scrapertools.decodeHtmlentities(scrapedplot) - itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, - title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, - folder=True)) - - # Paginazione - patronvideos = '<div class="footerList clearfix">\s*<div class="sx">\s*[^>]+>[^g]+gina[^>]+>\s*[^>]+>\s*<div class="dx">\s*<a href="(.*?)">pagina suc' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - scrapedurl = urlparse.urljoin(item.url, matches[0]) - itemlist.append( - Item(channel=item.channel, - action="peliculas", - title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]", - url= scrapedurl, - thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", - folder=True)) - - return itemlist - -def epoche(item): - logger.info("kod.cinetecadibologna categorias") - itemlist = [] - - data = httptools.downloadpage(item.url, headers=headers).data - - # Narrow search by selecting only the combo - bloque = scrapertools.find_single_match(data, '<h1 class="pagetitle">Epoche</h1>(.*?)</ul>') - - # The categories are the options for the combo - patron = '<a href="([^"]+)">(.*?)<' - matches = re.compile(patron, re.DOTALL).findall(bloque) - - for scrapedurl, scrapedtitle in matches: - scrapedurl = host + scrapedurl - scrapedplot = "" - if scrapedtitle.startswith(("'")): - scrapedtitle = scrapedtitle.replace("'", "Anni '") - itemlist.append( - Item(channel=item.channel, - action="peliculas", - title="[COLOR azure]" + scrapedtitle + "[/COLOR]", - url=scrapedurl, - thumbnail="http://www.cinetecadibologna.it/pics/cinema-ritrovato-alcinema.png", - plot=scrapedplot)) - - return itemlist - -def percorsi(item): - logger.info("kod.cinetecadibologna categorias") - itemlist = [] - - data = httptools.downloadpage(item.url, headers=headers).data - - patron = '<div class="cover_percorso">\s*<a href="([^"]+)">\s*<img src="([^"]+)"[^>]+>\s*[^>]+>(.*?)<' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - scrapedurl = host + scrapedurl - scrapedplot = "" - scrapedthumbnail = host + scrapedthumbnail - itemlist.append( - Item(channel=item.channel, - action="peliculas", - title="[COLOR azure]" + scrapedtitle + "[/COLOR]", - url=scrapedurl, - thumbnail=scrapedthumbnail, - plot=scrapedplot)) - - return itemlist def findvideos(item): - logger.info("kod.cinetecadibologna findvideos") + support.log() itemlist = [] - data = httptools.downloadpage(item.url, headers=headers).data + matches = support.match(item, 'filename: "(.*?)"')[0] - patron = 'filename: "(.*?)"' - matches = re.compile(patron, re.DOTALL).findall(data) - - for video in matches: - video = host + video + for url in matches: itemlist.append( Item( channel=item.channel, action="play", - title=item.title + " [[COLOR orange]Diretto[/COLOR]]", - url=video, + title='Diretto', + server='directo', + url=host + url, folder=False)) - return itemlist + return support.server(item, itemlist=itemlist) diff --git a/channels/documentaristreamingda.py b/channels/documentaristreamingda.py index 876a3a95..80b2bb76 100644 --- a/channels/documentaristreamingda.py +++ b/channels/documentaristreamingda.py @@ -10,8 +10,7 @@ from core import httptools, scrapertools, servertools, support from core.item import Item from platformcode import logger, config -__channel__ = "documentaristreamingda" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() list_servers = [''] list_quality = [''] diff --git a/channels/dreamsub.py b/channels/dreamsub.py index 4e69bb3d..e5ac914c 100644 --- a/channels/dreamsub.py +++ b/channels/dreamsub.py @@ -47,12 +47,11 @@ import re from core import support from platformcode import config -from core import scrapertoolsV2, httptools, servertools, tmdb +from core import scrapertools, httptools, servertools, tmdb from core.item import Item ##### fine import -__channel__ = "dreamsub" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] # server di esempio... @@ -229,8 +228,8 @@ def findvideos(item): data = re.sub(r'>\s\s*<', '><', data) patronBlock = r'LINK STREAMING(?P<block>.*?)LINK DOWNLOAD' patron = r'href="(.+?)"' - block = scrapertoolsV2.find_single_match(data, patronBlock) - urls = scrapertoolsV2.find_multiple_matches(block, patron) + block = scrapertools.find_single_match(data, patronBlock) + urls = scrapertools.find_multiple_matches(block, patron) #support.regexDbg(item, patron, headers, data=data) for url in urls: @@ -242,7 +241,7 @@ def findvideos(item): lang = 'ITA' if 'keepem.online' in data: - urls = scrapertoolsV2.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"') + urls = scrapertools.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"') for url in urls: url = httptools.downloadpage(url).url itemlist += servertools.find_video_items(data=url) @@ -255,14 +254,14 @@ def findvideos(item): data = httptools.downloadpage(data).data support.log("LINK-DATA2 :", data) - video_urls = scrapertoolsV2.find_single_match(data, r'<meta name="description" content="([^"]+)"') + video_urls = scrapertools.find_single_match(data, r'<meta name="description" content="([^"]+)"') else: data = httptools.downloadpage(url).data - #host_video = scrapertoolsV2.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"') - host_video = scrapertoolsV2.find_single_match(data, r'(?:let|var) thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"') - link = scrapertoolsV2.find_single_match(data, r'<video src="([^"]+)"') + #host_video = scrapertools.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"') + host_video = scrapertools.find_single_match(data, r'(?:let|var) thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"') + link = scrapertools.find_single_match(data, r'<video src="([^"]+)"') video_urls = host_video+link title_show = support.typo(titles,'_ bold') + support.typo(lang,'_ [] color kod') diff --git a/channels/eurostreaming.py b/channels/eurostreaming.py index 1547180a..2101e2d4 100644 --- a/channels/eurostreaming.py +++ b/channels/eurostreaming.py @@ -12,20 +12,16 @@ - serie, anime """ import re -from core import scrapertoolsV2, httptools, support +from core import scrapertools, httptools, support from core.item import Item -from platformcode import config - -#impostati dinamicamente da findhost() -host = "https://eurostreaming.pink" -headers = "" def findhost(): - pass - # global host, headers - # permUrl = httptools.downloadpage('https://eurostreaming.link/', follow_redirects=False).headers - # host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') - # headers = [['Referer', host]] + permUrl = httptools.downloadpage('https://eurostreaming.link/', follow_redirects=False).headers + host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') + return host + +host = support.config.get_channel_url(findhost) +headers = [['Referer', host]] @@ -35,7 +31,7 @@ list_quality = ['default'] @support.menu def mainlist(item): support.log() - findhost() + tvshow = ['' ] @@ -98,13 +94,13 @@ def pagina(url): data = httptools.downloadpage(url, headers=headers).data.replace("'", '"') #support.log("DATA ----###----> ", data) if 'clicca qui per aprire' in data.lower(): - url = scrapertoolsV2.find_single_match(data, '"go_to":"([^"]+)"') + url = scrapertools.find_single_match(data, '"go_to":"([^"]+)"') url = url.replace("\\","") # Carica la pagina data = httptools.downloadpage(url, headers=headers).data.replace("'", '"') elif 'clicca qui</span>' in data.lower(): - url = scrapertoolsV2.find_single_match(data, '<h2 style="text-align: center;"><a href="([^"]+)">') + url = scrapertools.find_single_match(data, '<h2 style="text-align: center;"><a href="([^"]+)">') # Carica la pagina data = httptools.downloadpage(url, headers=headers).data.replace("'", '"') @@ -113,7 +109,7 @@ def pagina(url): # =========== def ricerca ============= def search(item, texto): support.log() - findhost() + item.url = "%s/?s=%s" % (host, texto) item.contentType = 'tvshow' @@ -131,7 +127,7 @@ def search(item, texto): def newest(categoria): support.log() - findhost() + itemlist = [] item = Item() item.contentType = 'tvshow' diff --git a/channels/fastsubita.py b/channels/fastsubita.py index ef6d52ff..b022c94e 100644 --- a/channels/fastsubita.py +++ b/channels/fastsubita.py @@ -16,13 +16,12 @@ - SOLO SUB-ITA """ -from core import support, httptools, scrapertoolsV2 +from core import support, httptools, scrapertools from core.item import Item from core.support import log from platformcode import config -__channel__ = 'fastsubita' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome'] list_quality = ['default'] @@ -159,7 +158,7 @@ def findvideos(item): data = httptools.downloadpage(item.url).data patron = r'>Posted in <a href="https?://fastsubita.com/serietv/([^/]+)/(?:[^"]+)?"' - series = scrapertoolsV2.find_single_match(data, patron) + series = scrapertools.find_single_match(data, patron) titles = support.typo(series.upper().replace('-', ' '), 'bold color kod') goseries = support.typo("Vai alla Serie:", ' bold color kod') itemlist.append( diff --git a/channels/filmigratis.py b/channels/filmigratis.py index a40c92aa..284f381c 100644 --- a/channels/filmigratis.py +++ b/channels/filmigratis.py @@ -17,8 +17,7 @@ from core import servertools, httptools, support from core.item import Item from platformcode import config -__channel__ = 'filmigratis' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() list_servers = ['verystream', 'openload', 'streamango', 'vidoza', 'okru'] list_quality = ['1080p', '720p', '480p', '360'] diff --git a/channels/filmpertutti.py b/channels/filmpertutti.py index 6e1899ab..28c8f9bd 100644 --- a/channels/filmpertutti.py +++ b/channels/filmpertutti.py @@ -17,13 +17,12 @@ """ import re -from core import scrapertoolsV2, httptools, support +from core import scrapertools, httptools, support from core.item import Item from platformcode import config -__channel__ = 'filmpertutti' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] list_servers = ['speedvideo', 'verystream', 'openload', 'streamango', 'wstream', 'akvideo'] list_quality = ['HD', 'SD'] @@ -114,7 +113,7 @@ def select(item): support.log() data = httptools.downloadpage(item.url, headers=headers).data - patronBlock = scrapertoolsV2.find_single_match(data, r'class="taxonomy category" ><span property="name">(.*?)</span></a><meta property="position" content="2">') + patronBlock = scrapertools.find_single_match(data, r'class="taxonomy category" ><span property="name">(.*?)</span></a><meta property="position" content="2">') if patronBlock.lower() != 'film': support.log('select = ### è una serie ###') item.contentType='tvshow' @@ -170,6 +169,10 @@ def newest(categoria): def findvideos(item): if item.contentType == 'movie': - return support.server(item) + data = httptools.downloadpage(item.url).data + toUnshorten = scrapertools.find_multiple_matches(data, 'https?://buckler.link/[a-zA-Z0-9]+') + for link in toUnshorten: + data += '\n' + httptools.downloadpage(link, follow_redirects=False).headers["Location"] + return support.server(item, data=data) else: return support.server(item, item.url) diff --git a/channels/filmsenzalimiticc.py b/channels/filmsenzalimiticc.py index f4e25faf..2f3fd290 100644 --- a/channels/filmsenzalimiticc.py +++ b/channels/filmsenzalimiticc.py @@ -14,8 +14,7 @@ from platformcode import logger from specials import autoplay # Necessario per Autoplay -__channel__ = 'filmsenzalimiticc' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() diff --git a/channels/guardaserieclick.py b/channels/guardaserieclick.py index 1e129c45..99380efd 100644 --- a/channels/guardaserieclick.py +++ b/channels/guardaserieclick.py @@ -19,8 +19,7 @@ from core.item import Item from platformcode import config from core.support import log -__channel__ = 'guardaserieclick' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] list_servers = ['speedvideo', 'openload'] diff --git a/channels/hdblog.json b/channels/hdblog.json deleted file mode 100644 index a352408c..00000000 --- a/channels/hdblog.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "id": "hdblog", - "name": "Hdblog", - "language": ["ita"], - "active": true, - "adult": false, - "thumbnail": "http://css.hd-cdn.it/new_files/templates/theme_darklight/img/logos_wt/logohdhardware.png", - "banner": "http://css.hd-cdn.it/new_files/templates/theme_darklight/img/logos_wt/logohdhardware.png", - "categories": ["documentary"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Includi in Ricerca Globale", - "default": false, - "enabled": false, - "visible": false - }, - { - "id": "include_in_newest_documentales", - "type": "bool", - "label": "Includi in Novità - Documentari", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - } - ] -} - diff --git a/channels/hdblog.py b/channels/hdblog.py deleted file mode 100644 index c52e4266..00000000 --- a/channels/hdblog.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Ringraziamo Icarus crew -# Canale hdblog -# ------------------------------------------------------------ -import re -import urlparse - -from core import httptools, scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = "https://www.hdblog.it" - - -def mainlist(item): - logger.info("kod.hdblog mainlist") - itemlist = [Item(channel=item.channel, - title="[COLOR azure]Video recensioni tecnologiche[/COLOR]", - action="peliculas", - url=host + "/video/", - thumbnail="http://www.crat-arct.org/uploads/images/tic%201.jpg"), - Item(channel=item.channel, - title="[COLOR azure]Categorie[/COLOR]", - action="categorias", - url=host + "/video/", - thumbnail="http://www.crat-arct.org/uploads/images/tic%201.jpg")] - - return itemlist - - -def categorias(item): - logger.info("kod.hdblog categorias") - itemlist = [] - - data = httptools.downloadpage(item.url).data - logger.info(data) - - # Narrow search by selecting only the combo - start = data.find('<section class="left_toolbar" style="float: left;width: 125px;margin-right: 18px;">') - end = data.find('</section>', start) - bloque = data[start:end] - - # The categories are the options for the combo - patron = '<a href="([^"]+)"[^>]+><span>(.*?)</span>' - matches = re.compile(patron, re.DOTALL).findall(bloque) - scrapertools.printMatches(matches) - - for scrapedurl, scrapedtitle in matches: - scrapedthumbnail = "" - scrapedplot = "" - itemlist.append( - Item(channel=item.channel, - action="peliculas", - title="[COLOR azure]" + scrapedtitle + "[/COLOR]", - url=scrapedurl + "video/", - thumbnail=scrapedthumbnail, - plot=scrapedplot)) - - return itemlist - - -def peliculas(item): - logger.info("kod.hdblog peliculas") - itemlist = [] - - # Carica la pagina - data = httptools.downloadpage(item.url).data - - # Estrae i contenuti - patron = '<a class="thumb_new_image" href="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>\s*</a>\s*[^>]+>\s*(.*?)\s*<' - matches = re.compile(patron, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) - scrapedplot = "" - itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, - title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, - folder=True)) - - # Paginazione - patronvideos = '<span class="attiva">[^>]+>[^=]+="next" href="(.*?)" class="inattiva">' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - if len(matches) > 0: - scrapedurl = urlparse.urljoin(item.url, matches[0]) - itemlist.append( - Item(channel=item.channel, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]", url=scrapedurl, - folder=True)) - - return itemlist diff --git a/channels/ilgeniodellostreaming.py b/channels/ilgeniodellostreaming.py index 5f79d839..3cbb560e 100644 --- a/channels/ilgeniodellostreaming.py +++ b/channels/ilgeniodellostreaming.py @@ -33,13 +33,12 @@ import re -from core import scrapertoolsV2, httptools, support +from core import scrapertools, httptools, support from core.support import log from core.item import Item from platformcode import config -__channel__ = 'ilgeniodellostreaming' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() list_servers = ['verystream', 'openload', 'streamango'] list_quality = ['default'] @@ -217,7 +216,7 @@ def findvideos(item): matches, data = support.match(item, '<iframe class="metaframe rptss" src="([^"]+)"[^>]+>',headers=headers) for url in matches: html = httptools.downloadpage(url, headers=headers).data - data += str(scrapertoolsV2.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">')) + data += str(scrapertools.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">')) itemlist = support.server(item, data) @@ -225,7 +224,7 @@ def findvideos(item): data = httptools.downloadpage(item.url).data patron = r'<div class="item"><a href="'+host+'/serietv/([^"\/]+)\/"><i class="icon-bars">' - series = scrapertoolsV2.find_single_match(data, patron) + series = scrapertools.find_single_match(data, patron) titles = support.typo(series.upper().replace('-', ' '), 'bold color kod') goseries = support.typo("Vai alla Serie:", ' bold') itemlist.append( diff --git a/channels/ilgiramondo.json b/channels/ilgiramondo.json deleted file mode 100644 index a20bad1f..00000000 --- a/channels/ilgiramondo.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "id": "ilgiramondo", - "name": "IlGiramondo", - "language": ["ita"], - "active": true, - "adult": false, - "thumbnail": "http://www.ilgiramondo.net/wp-content/uploads/2013/05/logo-fixed.jpg", - "banner": "http://www.ilgiramondo.net/wp-content/uploads/2013/05/logo-fixed.jpg", - "categories": ["documentary"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false - }, - { - "id": "include_in_newest_documentales", - "type": "bool", - "label": "Includi in Novità - Documentari", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - } - ] -} - diff --git a/channels/ilgiramondo.py b/channels/ilgiramondo.py deleted file mode 100644 index 147c8d42..00000000 --- a/channels/ilgiramondo.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Ringraziamo Icarus crew -# Canale ilgiramondo -# ------------------------------------------------------------ -import re -import urlparse - -from core import httptools, scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = "http://www.ilgiramondo.net" - - -def mainlist(item): - logger.info("kod.ilgiramondo mainlist") - itemlist = [Item(channel=item.channel, title="[COLOR azure]Video di Viaggi[/COLOR]", action="peliculas", - url=host + "/video-vacanze-viaggi/", - thumbnail="http://hotelsjaisalmer.com/wp-content/uploads/2016/10/Travel1.jpg")] - - return itemlist - - -def peliculas(item): - logger.info("kod.ilgiramondo peliculas") - itemlist = [] - - # Carica la pagina - data = httptools.downloadpage(item.url).data - - # Estrae i contenuti - patron = '<article id=[^>]+><div class="space">\s*<a href="([^"]+)"><img[^s]+src="(.*?)"[^>]+><\/a>' - matches = re.compile(patron, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for scrapedurl, scrapedthumbnail in matches: - html = httptools.downloadpage(scrapedurl).data - start = html.find("</script></div>") - end = html.find("</p>", start) - scrapedplot = html[start:end] - scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot) - scrapedplot = scrapertools.decodeHtmlentities(scrapedplot) - html = httptools.downloadpage(scrapedurl).data - start = html.find("<title>") - end = html.find("", start) - scrapedtitle = html[start:end] - scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle) - scrapedtitle = scrapedtitle.replace(" | Video Di Viaggi E Vacanze", "") - # scrapedplot = "" - itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, - title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, - folder=True)) - - # Paginazione - patronvideos = '
') - - patron = ']+>Film([^<]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for url, title in matches: - scrapedtitle = title - scrapedurl = urlparse.urljoin(item.url, url) - - if scrapedtitle.startswith((" Porno")): - continue - - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( - Item(channel=item.channel, - action='peliculas', - extra=item.extra, - title="[COLOR azure]" + scrapedtitle + "[/COLOR]", - url=scrapedurl, - thumbnail=scrapedthumbnail, - plot=scrapedplot, - folder=True)) - - return itemlist - - -def search(item, texto): - logger.info("[italiafilm.py] search " + texto) - item.url = host + "/?s=" + texto - - try: - if item.extra == "movie": - return peliculas(item) - if item.extra == "tvshow": - return peliculas_tv(item) - # Continua la ricerca in caso di errore - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def latestep(item): - logger.info("[italiafilm.py] latestep") - itemlist = [] - - data = httptools.downloadpage(item.url).data - blocco = scrapertools.find_single_match(data, r'
  • \s*[^>]+>([^<|^(]+)[^>]+>\s*]+>[^>]+>[^>]+>(?:[^>]+>[^>]+>|)([^<]+)(?:[^>]+>[^>]+>|)' - matches = re.compile(patron, re.DOTALL).findall(blocco) - - for scrapedtitle, scrapedurl, scrapedepisode in matches: - scrapedepisode = scrapertools.decodeHtmlentities(scrapedepisode) - scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip() - completetitle = "%s - %s" % (scrapedtitle, scrapedepisode) - - unsupportedeps = re.compile(r'\d+\-\d+', re.DOTALL).findall(scrapedepisode) - if len(unsupportedeps) > 0: - continue - - if 'completa' in scrapedtitle.lower(): - itemlist.append( - Item(channel=item.channel, - action="episodios", - title=completetitle, - contentSerieName=completetitle, - fulltitle=scrapedtitle, - url=scrapedurl, - folder=True)) - else: - if 'episodio' not in scrapedepisode: - replace = re.compile(r'(\d+)x(\d+)') - ep_pattern = r'%s(.*?(?:|

    ))' % replace.sub(r'\g<1>×\g<2>', scrapedepisode) - else: - ep_pattern = r'%s(.*?(?:|

    ))' % scrapedepisode - - itemlist.append( - Item(channel=item.channel, - action="findvideos_single_ep", - title=completetitle, - contentSerieName=completetitle, - fulltitle=scrapedtitle, - url=scrapedurl, - extra=ep_pattern, - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - return itemlist - - -def peliculas(item): - logger.info("[italiafilm.py] peliculas") - itemlist = [] - - data = httptools.downloadpage(item.url, headers=headers).data - patron = '' - matches = re.compile(patron, re.DOTALL).findall(data) - - for match in matches: - title = scrapertools.find_single_match(match, '([^<]+)') - title = title.replace("Streaming", "") - title = scrapertools.decodeHtmlentities(title).strip() - url = scrapertools.find_single_match(match, '') # - for url in urls: # Fix - page = httptools.downloadpage(url, headers=headers).data # - data += '\n' + scrapertools.find_single_match(page,'') # - - - for videoitem in servertools.find_video_items(data=data): - videoitem.title = item.title + videoitem.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.show = item.show - videoitem.plot = item.plot - videoitem.channel = item.channel - videoitem.contentType = item.contentType - itemlist.append(videoitem) - - return itemlist - - -def peliculas_tv(item): - logger.info("[italiafilm.py] peliculas") - itemlist = [] - - data = httptools.downloadpage(item.url, headers=headers).data - patron = '' - matches = re.compile(patron, re.DOTALL).findall(data) - - for match in matches: - title = scrapertools.find_single_match(match, '([^<]+)') - title = title.replace("Streaming", "") - title = scrapertools.decodeHtmlentities(title).strip() - show_title = re.sub('\(.*?\)', '', title.replace('Serie TV', '')) - url = scrapertools.find_single_match(match, ' 0: - scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip() - else: - scrapedtitle = '' - if scrapedtitle == '': - patron = '([^<]+)' - scrapedtitle = scrapertools.find_single_match(data, patron).strip() - title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+') - if title == '': - title = scrapedtitle - if title != '': - title = re.sub(r"(\d+)[^\d]+(\d+)", r"\1x\2", title) - title += " (" + lang_title + ")" - itemlist.append( - Item(channel=item.channel, - action="findvideos", - contentType="episode", - title=title, - url=data, - thumbnail=item.thumbnail, - extra=item.extra, - fulltitle=title + ' - ' + item.show, - show=item.show)) - - logger.info("[italiafilm.py] episodios") - - itemlist = [] - - # Carica la pagina - data = httptools.downloadpage(item.url, headers=headers).data.replace('
    ','\n') # fix - - start = data.find('id="pd_rating_holder') - end = data.find('id="linkcorrotto-show"', start) - - data = data[start:end] - - lang_titles = [] - starts = [] - patron = r"STAGION[I|E](.*?ITA)?" - matches = re.compile(patron, re.IGNORECASE).finditer(data) - for match in matches: - season_title = match.group() - # if season_title != '': - lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA') - starts.append(match.end()) - - i = 1 - len_lang_titles = len(lang_titles) - - while i <= len_lang_titles: - inizio = starts[i - 1] - fine = starts[i] if i < len_lang_titles else -1 - - html = data[inizio:fine] - lang_title = lang_titles[i - 1] - - load_episodios(html, item, itemlist, lang_title) - - i += 1 - - if len(itemlist) == 0: - load_episodios(data, item, itemlist, 'ITA') - - if config.get_videolibrary_support() and len(itemlist) != 0: - itemlist.append( - Item(channel=item.channel, - title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161), - url=item.url, - action="add_serie_to_library", - extra="episodios", - show=item.show)) - - return itemlist - - -def findvideos(item): - logger.info("kod.italiafilm findvideos") - - if item.contentType == "movie": - return findvid(item) - - # Carica la pagina - data = item.url - - urls = scrapertools.find_multiple_matches(data, '') # - for url in urls: # Fix - page = httptools.downloadpage(url, headers=headers).data # - data += '\n' + scrapertools.find_single_match(page,'') # - - itemlist = servertools.find_video_items(data=data) - - for videoitem in itemlist: - videoitem.title = item.title + videoitem.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.show = item.show - videoitem.plot = item.plot - videoitem.channel = item.channel - videoitem.contentType = item.contentType - - return itemlist - - -def findvideos_single_ep(item): - logger.info("[italiafilm.py] findvideos_single_ep") - - data = httptools.downloadpage(item.url).data - - data = scrapertools.find_single_match(data, item.extra) - - itemlist = servertools.find_video_items(data=data) - - for videoitem in itemlist: - server = re.sub(r'[-\[\]\s]+', '', videoitem.title) - videoitem.title = "".join(["[[COLOR orange]%s[/COLOR]] " % server.capitalize(), item.title]) - videoitem.fulltitle = item.fulltitle - videoitem.show = item.show - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - - return itemlist diff --git a/channels/italiaserie.py b/channels/italiaserie.py index 4a072910..8269a6f1 100644 --- a/channels/italiaserie.py +++ b/channels/italiaserie.py @@ -15,12 +15,11 @@ """ import re -from core import support, httptools, scrapertoolsV2 +from core import support, httptools, scrapertools from core.item import Item from platformcode import config -__channel__ = 'italiaserie' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] list_servers = ['speedvideo'] @@ -131,8 +130,8 @@ def findvideos(item): data = httptools.downloadpage(item.url, headers=headers).data data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) - url_video = scrapertoolsV2.find_single_match(data, r' ', -1) - url_serie = scrapertoolsV2.find_single_match(data, r'') + url_video = scrapertools.find_single_match(data, r' ', -1) + url_serie = scrapertools.find_single_match(data, r'') goseries = support.typo("Vai alla Serie:", ' bold') series = support.typo(item.contentSerieName, ' bold color kod') itemlist = support.server(item, data=url_video) diff --git a/channels/mondoserietv.py b/channels/mondoserietv.py index 3b77b9e7..220997f0 100644 --- a/channels/mondoserietv.py +++ b/channels/mondoserietv.py @@ -5,8 +5,7 @@ from core import support -__channel__ = "mondoserietv" -host = support.config.get_channel_url(__channel__) +host = support.config.get_channel_url() IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() diff --git a/channels/netfreex.py b/channels/netfreex.py index e0be74ca..ec192c87 100644 --- a/channels/netfreex.py +++ b/channels/netfreex.py @@ -7,8 +7,7 @@ from core import support from core.item import Item from platformcode import logger, config -__channel__ = "netfreex" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = "" IDIOMAS = {'Italiano': 'IT'} diff --git a/channels/piratestreaming.py b/channels/piratestreaming.py index 0c72b846..c8b797a3 100644 --- a/channels/piratestreaming.py +++ b/channels/piratestreaming.py @@ -7,8 +7,7 @@ from core import support from core.support import config, log -__channel__ = "piratestreaming" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() list_servers = ['mixdrop', 'speedvideo', 'gounlimited', 'onlystream', 'youtube'] list_quality = ['default'] diff --git a/channels/polpotv.py b/channels/polpotv.py index 98ed41e0..71e9560d 100644 --- a/channels/polpotv.py +++ b/channels/polpotv.py @@ -9,8 +9,7 @@ from core.item import Item from platformcode import config import json, datetime -__channel__ = "polpotv" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Accept', 'application/ld+json']] diff --git a/channels/programmazione.json b/channels/programmazione.json deleted file mode 100644 index 6afa5cf7..00000000 --- a/channels/programmazione.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "id": "programmazione", - "name": "Programmazione", - "language": ["ita"], - "active": true, - "adult": false, - "thumbnail": "http://www.smartworld.it/wp-content/uploads/2015/02/codice-code-programmazione-fhd-720x480.png", - "banner": "http://www.smartworld.it/wp-content/uploads/2015/02/codice-code-programmazione-fhd-720x480.png", - "categories": ["documentary"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false - } - ] -} - diff --git a/channels/programmazione.py b/channels/programmazione.py deleted file mode 100644 index 15aa160d..00000000 --- a/channels/programmazione.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Ringraziamo Icarus crew -# Canale Video Corsi Programmazione -# Creato da iSOD -# https://alfa-addon.com/categories/kod-addon.50/. -# ------------------------------------------------------------ -import re - -from core import httptools, scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - -site = "https://www.youtube.com" - - -def mainlist(item): - logger.info("kod.programmazione mainlist") - itemlist = [] - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Html 5[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL7A4A3449C649048F", thumbnail="http://i.ytimg.com/vi/TyCvfNt20nM/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Css[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLD74C5E763D39793D", thumbnail="http://i.ytimg.com/vi/hd8k82aG_O4/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Javascript[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL1A447BA7F7F9EB9E", thumbnail="http:////i.ytimg.com/vi/eXlzdxyThLM/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso PHP[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJc664i2Cv0X0ibM9b1YqRyd", thumbnail="http://i.ytimg.com/vi/0nA1gPWdBWw/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso PHP Mysql[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL101314D973955661", thumbnail="http://i.ytimg.com/vi/QIxmITjITY8/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Jquery[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLC959BB22285B353F", thumbnail="http://i.ytimg.com/vi/mxl2IcNdbrk/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java da Zero[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJe2dpE7di4aPJwrQuRD6IDD", thumbnail="http://i.ytimg.com/vi/7PGPLqFpDMc/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java 2 OOP[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJee1dk24wX-68yHTnMfzdX5", thumbnail="http://i.ytimg.com/vi/h6VoxIAUZoo/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java Interfaccia Grafica[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJfRML8EDs7v9nwjdOt6dvaf", thumbnail="http://i.ytimg.com/vi/fS7OxhbIlw4/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java Android[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJeqmBWbE1Rbac2QWHoPCjR2", thumbnail="http://i.ytimg.com/vi/GINLfdq-elE/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Progettazione DB[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJcJPSV4sOfhLtPbtQ-yycFH", thumbnail="http://i.ytimg.com/vi/FnkL4YdWAwE/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso SQL[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLE555DB6188C967AC", thumbnail="http://i.ytimg.com/vi/jM55Fb9YTfE/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Python[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLC64779F4E2E7EB10", thumbnail="http://i.ytimg.com/vi/_iX9CSX09Z8/mqdefault.jpg")) - itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Unit 3D[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJcbl6ZHwtFIkFxWY-adCeS7", thumbnail="http://i.ytimg.com/vi/QiFBrHp3IGk/mqdefault.jpg")) - - return itemlist - - -def corsi(item): - logger.info("kod.programmazione peliculas") - itemlist = [] - - # scarrico il canale - html = httptools.downloadpage(item.url).data - - # Estraggo l'elenco dei video e titoli - patron = ']+>(.*?)' - trovati = re.compile(patron, re.DOTALL).findall(html) - scrapertools.printMatches(trovati) - max = len(trovati) - min = 0 - - # ciclo sull'elenco trovato - for VideoUrl, VideoTitolo in trovati: - # Decodifico Html - titolo = scrapertools.decodeHtmlentities(VideoTitolo) - # contatore - min += 1 - # aggiungo alla lista - itemlist.append( - Item(channel=item.channel, - action="findvideos", - fulltitle=titolo, - show=titolo, - title="[COLOR azure]" + item.title + " - " + str(min) + "x" + str(max) + "[/COLOR]", - url=site + VideoUrl, - thumbnail=item.thumbnail, - plot=titolo, - folder=True)) - - return itemlist diff --git a/channels/renumbertools.py b/channels/renumbertools.py deleted file mode 100644 index 89a391ac..00000000 --- a/channels/renumbertools.py +++ /dev/null @@ -1,1001 +0,0 @@ -# -*- coding: utf-8 -*- -# -------------------------------------------------------------------------------- -# renumeratetools - se encarga de renumerar episodios -# -------------------------------------------------------------------------------- - -import os - -try: - import xbmcgui -except: - xbmcgui = None - -from platformcode import config, logger -from core import jsontools -from core.item import Item -from platformcode import platformtools - -TAG_TVSHOW_RENUMERATE = "TVSHOW_RENUMBER" -TAG_SEASON_EPISODE = "season_episode" -__channel__ = "renumbertools" - - -def access(): - """ - Devuelve si se puede usar o no renumbertools - """ - allow = False - - if config.is_xbmc(): - allow = True - - return allow - - -def context(item): - """ - Para xbmc/kodi que pueden mostrar el menú contextual, se añade un menu para configuración - la opción de renumerar, sólo si es para series. - - @param item: elemento para obtener la información y ver que contexto añadir - @type item: item - @return: lista de opciones a mostrar en el menú contextual - @rtype: list - """ - - # Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools. - if type(item.context) == str: - _context = item.context.split("|") - elif type(item.context) == list: - _context = item.context - else: - _context = [] - - if access(): - dict_data = {"title": config.get_localized_string(70585), "action": "config_item", "channel": "renumbertools"} - _context.append(dict_data) - - return _context - - -def show_option(channel, itemlist): - if access(): - itemlist.append(Item(channel=__channel__, title="[COLOR yellow]" + config.get_localized_string(70586)+ "[/COLOR]", - action="load", from_channel=channel)) - - return itemlist - - -def load(item): - return mainlist(channel=item.from_channel) - - -def mainlist(channel): - """ - Muestra una lista de las series renumeradas - - :param channel: nombre del canal para obtener las series renumeradas - :type channel: str - :return: lista de Item - :rtype: list[Item] - """ - logger.info() - itemlist = [] - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - - idx = 0 - for tvshow in sorted(dict_series): - tag_color = "0xff008000" - if idx % 2 == 0: - tag_color = "blue" - - idx += 1 - name = tvshow - title = config.get_localized_string(70587)+" [COLOR %s][%s][/COLOR]" % (tag_color, name) - - itemlist.append(Item(channel=__channel__, action="config_item", title=title, show=name, from_channel=channel)) - - if len(itemlist) == 0: - itemlist.append(Item(channel=channel, action="mainlist", - title=config.get_localized_string(70588) + ' ' + config.get_localized_string(70585))) - - return itemlist - - -def config_item(item): - """ - muestra una serie renumerada para su configuración - - :param item: item - :type item: Item - """ - logger.info("item %s" % item.tostring("\n")) - - dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_RENUMERATE) - data = dict_series.get(item.show, {}) - - if data: - data = data.get(TAG_SEASON_EPISODE, []) - - ventana = RenumberWindow(show=item.show, channel=item.from_channel, data=data) - del ventana - else: - # tenemos información y devolvemos los datos añadidos para que se muestre en la ventana - if data: - return add_season(data) - # es la primera vez que se añaden datos (usando menú contextual) por lo que no devolvemos nada - # para evitar error al listar los items - else: - data = add_season(data) - write_data(item.from_channel, item.show, data) - - -def numbered_for_tratk(channel, show, season, episode): - """ - Devuelve la temporada y episodio convertido para que se marque correctamente en tratk.tv - - @param channel: Nombre del canal - @type channel: str - @param show: Nombre de la serie a comprobar - @type show: str - @param season: Temporada que devuelve el scrapper - @type season: int - @param episode: Episodio que devuelve el scrapper - @type episode: int - @return: season, episode - @rtype: int, int - """ - logger.info() - - if access(): - show = show.lower() - - new_season = season - new_episode = episode - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - - # ponemos en minusculas el key, ya que previamente hemos hecho lo mismo con show. - for key in dict_series.keys(): - new_key = key.lower() - if new_key != key: - dict_series[new_key] = dict_series[key] - del dict_series[key] - - if show in dict_series: - logger.debug(config.get_localized_string(70589) + " %s" % dict_series[show]) - - if len(dict_series[show]['season_episode']) > 1: - for row in dict_series[show]['season_episode']: - - if new_episode > row[1]: - new_episode -= row[1] - new_season = row[0] - break - - else: - new_season = dict_series[show]['season_episode'][0][0] - new_episode += dict_series[show]['season_episode'][0][1] - - logger.debug("%s:%s" % (new_season, new_episode)) - else: - # no se tiene acceso se devuelven los datos. - new_season = season - new_episode = episode - - return new_season, new_episode - - -def borrar(channel, show): - logger.info() - heading = config.get_localized_string(70590) - line1 = config.get_localized_string(70591) + ' [COLOR blue]' + show.strip() + '[/COLOR], ' + config.get_localized_string(70592) - - if platformtools.dialog_yesno(heading, line1) == 1: - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - dict_series.pop(show, None) - - result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE) - - if result: - message = config.get_localized_string(60444) - else: - message = config.get_localized_string(70593) - - heading = show.strip() - platformtools.dialog_notification(heading, message) - - -def add_season(data=None): - logger.debug("data %s" % data) - heading = config.get_localized_string(70594) - # default = 2 - # se reordena la lista - list_season_episode = data - if list_season_episode: - list_season_episode.sort(key=lambda el: int(el[0]), reverse=False) - - # if list_season_episode: - # # mostrar temporada + 1 de la lista - # # TODO buscar la primera posicion libre - # default = list_season_episode[0][0]+1 - - season = platformtools.dialog_numeric(0, heading) # , str(default)) - for element in list_season_episode: - if int(season) == element[0]: - platformtools.dialog_notification(config.get_localized_string(70595), config.get_localized_string(70596)) - return - - # si hemos insertado un valor en la temporada - if season != "" and int(season) > 0: - heading = config.get_localized_string(70597) - # default = 0 - # if list_season_episode: - # for e in list_season_episode: - # # mostrar suma episodios de la lista - # # sumar hasta el indice del primer libre encontrado - # default += e[1] - episode = platformtools.dialog_numeric(0, heading) # , str(default)) - - # si hemos insertado un valor en el episodio - if episode != "": - if list_season_episode: - list_season_episode.insert(0, [int(season), int(episode)]) - new_list_season_episode = list_season_episode[:] - return new_list_season_episode - else: - return [[int(season), int(episode)]] - - -def write_data(channel, show, data): - # OBTENEMOS LOS DATOS DEL JSON - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - tvshow = show.strip() - list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, []) - logger.debug("data %s" % list_season_episode) - - if data: - # cambiamos el orden para que se vea en orden descendente y usarse bien en el _data.json - data.sort(key=lambda el: int(el[0]), reverse=True) - dict_renumerate = {TAG_SEASON_EPISODE: data} - - dict_series[tvshow] = dict_renumerate - else: - # hemos borrado todos los elementos, por lo que se borra la serie del fichero - dict_series.pop(tvshow, None) - - result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE) - - if result: - if data: - message = config.get_localized_string(60446) - else: - message = config.get_localized_string(60444) - else: - message = config.get_localized_string(70593) - - heading = show.strip() - platformtools.dialog_notification(heading, message) - - -if xbmcgui: - - # Align - ALIGN_LEFT = 0 - ALIGN_RIGHT = 1 - ALIGN_CENTER_X = 2 - ALIGN_CENTER_Y = 4 - ALIGN_CENTER = 6 - ALIGN_TRUNCATED = 8 - ALIGN_JUSTIFY = 10 - - # button ids - ID_BUTTON_CLOSE = 3003 - ID_BUTTON_ADD_SEASON = 3008 - ID_BUTTON_INFO = 3009 - ID_CHECK_UPDATE_INTERNET = 3010 - ID_BUTTON_OK = 3012 - ID_BUTTON_CANCEL = 3013 - ID_BUTTON_DELETE = 3014 - - - class RenumberWindow(xbmcgui.WindowDialog): - def __init__(self, *args, **kwargs): - logger.debug() - - #### Compatibilidad con Kodi 18 #### - if config.get_platform(True)['num_version'] < 18: - if xbmcgui.__version__ == "1.2": - self.setCoordinateResolution(1) - else: - self.setCoordinateResolution(5) - - self.show = kwargs.get("show") - self.channel = kwargs.get("channel") - self.data = kwargs.get("data") - self.init = True - - self.mediapath = os.path.join(config.get_runtime_path(), 'resources', 'skins', 'Default', 'media') - self.font = "font12" - - window_bg = xbmcgui.ControlImage(320, 130, 600, 440, - os.path.join(self.mediapath, 'Windows', 'DialogBack.png')) - self.addControl(window_bg) - - header_bg = xbmcgui.ControlImage(window_bg.getX(), window_bg.getY() + 8, window_bg.getWidth(), 35, - os.path.join(self.mediapath, 'Windows', 'dialogheader.png')) - self.addControl(header_bg) - - btn_close_w = 64 - self.btn_close = xbmcgui.ControlButton(window_bg.getX() + window_bg.getWidth() - btn_close_w - 13, - header_bg.getY() + 6, btn_close_w, 30, '', - focusTexture=os.path.join(self.mediapath, 'Controls', - 'DialogCloseButton-focus.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'DialogCloseButton.png')) - self.addControl(self.btn_close) - - header_title_x = window_bg.getX() + 20 - header_title = xbmcgui.ControlFadeLabel(header_title_x, header_bg.getY() + 5, self.btn_close.getX() - - header_title_x, 30, font="font12_title", textColor="0xFFFFA500", - _alignment=ALIGN_CENTER) - self.addControl(header_title) - header_title.addLabel(self.show) - - self.controls_bg = xbmcgui.ControlImage(window_bg.getX() + 20, header_bg.getY() + header_bg.getHeight() + 6, - 562, 260, - os.path.join(self.mediapath, 'Windows', 'BackControls.png')) - self.addControl(self.controls_bg) - - self.scroll_bg = xbmcgui.ControlImage(window_bg.getX() + window_bg.getWidth() - 25, self.controls_bg.getY(), - 10, - self.controls_bg.getHeight(), os.path.join(self.mediapath, 'Controls', - 'ScrollBack.png')) - self.addControl(self.scroll_bg) - self.scroll_bg.setVisible(False) - - self.scroll2_bg = xbmcgui.ControlImage(window_bg.getX() + window_bg.getWidth() - 25, - self.controls_bg.getY(), - 10, self.controls_bg.getHeight(), os.path.join(self.mediapath, - 'Controls', - 'ScrollBar.png')) - self.addControl(self.scroll2_bg) - self.scroll2_bg.setVisible(False) - - btn_add_season = xbmcgui.ControlButton(window_bg.getX() + 20, self.controls_bg.getY() + - self.controls_bg.getHeight() + 14, 165, 30, config.get_localized_string(70600), - font=self.font, focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(btn_add_season) - - self.btn_info = xbmcgui.ControlButton(window_bg.getX() + 210, btn_add_season.getY(), 120, 30, config.get_localized_string(60348), - font=self.font, focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_info) - - check_update_internet_w = 235 - # Versiones antiguas no admite algunas texturas - if xbmcgui.__version__ in ["1.2", "2.0"]: - self.check_update_internet = xbmcgui.ControlRadioButton( - window_bg.getX() + window_bg.getWidth() - check_update_internet_w - 20, btn_add_season.getY() - 3, - check_update_internet_w, 34, config.get_localized_string(70601), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png')) - else: - self.check_update_internet = xbmcgui.ControlRadioButton( - window_bg.getX() + window_bg.getWidth() - check_update_internet_w - 20, btn_add_season.getY() - 3, - check_update_internet_w, 34, config.get_localized_string(70601), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png'), - focusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), - noFocusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), - focusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png'), - noFocusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png')) - - self.addControl(self.check_update_internet) - self.check_update_internet.setEnabled(False) - - hb_bg = xbmcgui.ControlImage(window_bg.getX() + 20, btn_add_season.getY() + btn_add_season.getHeight() + 13, - window_bg.getWidth() - 40, 2, - os.path.join(self.mediapath, 'Controls', 'ScrollBack.png')) - self.addControl(hb_bg) - - self.btn_ok = xbmcgui.ControlButton(window_bg.getX() + 68, hb_bg.getY() + hb_bg.getHeight() + 13, 120, 30, - 'OK', font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_ok) - - self.btn_cancel = xbmcgui.ControlButton(self.btn_info.getX() + 30, self.btn_ok.getY(), 120, 30, config.get_localized_string(70002), - font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_cancel) - - self.btn_delete = xbmcgui.ControlButton(self.btn_cancel.getX() + self.btn_cancel.getWidth() + 50, - self.btn_ok.getY(), 120, 30, config.get_localized_string(60437), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_delete) - - self.controls = [] - self.onInit() - self.setFocus(self.controls[0].edit_season) - self.doModal() - - def onInit(self, *args, **kwargs): - try: - # listado temporada / episodios - pos_y = self.controls_bg.getY() + 10 - - # eliminamos los componentes al repintar la ventana - for linea in self.controls: - self.removeControls(linea.list_elements()) - - # mostramos el scroll si hay más de 5 elementos - if len(self.data) > 5: - self.controls_bg.setWidth(545) - self.scroll_bg.setVisible(True) - self.scroll2_bg.setVisible(True) - else: - self.controls_bg.setWidth(562) - self.scroll_bg.setVisible(False) - self.scroll2_bg.setVisible(False) - - self.controls = [] - # cambiamos el orden para que se vea en orden ascendente - self.data.sort(key=lambda el: int(el[0]), reverse=False) - - for index, e in enumerate(self.data): - pos_x = self.controls_bg.getX() + 15 - label_season_w = 100 - label_season = xbmcgui.ControlLabel(pos_x, pos_y + 3, label_season_w, 34, - config.get_localized_string(60385), font=self.font, textColor="0xFF2E64FE") - self.addControl(label_season) - label_season.setVisible(False) - - pos_x += label_season_w + 5 - - # TODO mirar retro-compatilibidad - # if xbmcgui.ControlEdit == ControlEdit: - # edit_season = xbmcgui.ControlEdit(0, 0, 0, 0, '', font=self.font, isPassword=False, - # textColor='', - # focusTexture=os.path.join(self.mediapath, 'Controls', - # 'MenuItemFO.png'), - # noFocusTexture=os.path.join(self.mediapath, 'Controls', - # 'MenuItemNF.png'), window=self) - # else: - - # control bugeado se tiene que usar metodos sets para que se cree correctamente. - edit_season = xbmcgui.ControlEdit(0, 0, 0, 0, "", self.font, "", '', 4, isPassword=False, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemNF.png')) - self.addControl(edit_season) - edit_season.setText(str(e[0])) - # edit_season.setLabel("Temporada:", font=self.font, textColor="0xFF2E64FE") - edit_season.setPosition(pos_x, pos_y - 2) - edit_season.setWidth(25) - edit_season.setHeight(35) - edit_season.setVisible(False) - - label_episode_w = 90 - pos_x += edit_season.getWidth() + 60 - label_episode = xbmcgui.ControlLabel(pos_x, pos_y + 3, label_episode_w, 34, config.get_localized_string(70598), - font=self.font, textColor="0xFF2E64FE") - self.addControl(label_episode) - label_episode.setVisible(False) - - pos_x += label_episode_w + 5 - # control bugeado se tiene que usar metodos sets para que se cree correctamente. - edit_episode = xbmcgui.ControlEdit(0, 0, 0, 0, "", self.font, "", '', 4, isPassword=False, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemNF.png')) - self.addControl(edit_episode) - edit_episode.setText(str(e[1])) - # edit_episode.setLabel("Episodios:", font=self.font, textColor="0xFF2E64FE") - edit_episode.setPosition(pos_x, pos_y - 2) - edit_episode.setWidth(40) - edit_episode.setHeight(35) - edit_episode.setVisible(False) - - btn_delete_season_w = 120 - btn_delete_season = xbmcgui.ControlButton(self.controls_bg.getX() + self.controls_bg.getWidth() - - btn_delete_season_w - 14, pos_y, btn_delete_season_w, 30, - config.get_localized_string(70599), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(btn_delete_season) - btn_delete_season.setVisible(False) - - hb_bg = xbmcgui.ControlImage(self.controls_bg.getX() + 10, pos_y + 40, - self.controls_bg.getWidth() - 20, - 2, os.path.join(self.mediapath, 'Controls', 'ScrollBack.png')) - self.addControl(hb_bg) - hb_bg.setVisible(False) - - group = ControlGroup(label_season=label_season, edit_season=edit_season, - label_episode=label_episode, - edit_episode=edit_episode, btn_delete_season=btn_delete_season, hb=hb_bg) - - pos_y += 50 - - if index < 5: - group.set_visible(True) - - self.controls.append(group) - - if len(self.data) > 5: - self.move_scroll() - - except Exception, Ex: - logger.error("HA HABIDO UNA HOSTIA %s" % Ex) - - # def onClick(self, control_id): - # pass - # - # def onFocus(self, control_id): - # pass - - def onControl(self, control): - # logger.debug("%s" % control.getId()) - control_id = control.getId() - - if control_id == ID_BUTTON_OK: - write_data(self.channel, self.show, self.data) - self.close() - if control_id in [ID_BUTTON_CLOSE, ID_BUTTON_CANCEL]: - self.close() - elif control_id == ID_BUTTON_DELETE: - self.close() - borrar(self.channel, self.show) - elif control_id == ID_BUTTON_ADD_SEASON: - # logger.debug("data que enviamos: {}".format(self.data)) - data = add_season(self.data) - if data: - self.data = data - # logger.debug("data que recibimos: {}".format(self.data)) - self.onInit() - - # si hay más de 5 elementos movemos el scroll - if len(self.data) > 5: - self.scroll(len(self.data) - 2, 1) - self.move_scroll() - - elif control_id == ID_BUTTON_INFO: - self.method_info() - else: - for x, grupo in enumerate(self.controls): - if control_id == self.controls[x].btn_delete_season.getId(): - # logger.debug("A data %s" % self.data) - self.removeControls(self.controls[x].list_elements()) - del self.controls[x] - del self.data[x] - # logger.debug("D data %s" % self.data) - self.onInit() - - return - - def onAction(self, action): - # logger.debug("%s" % action.getId()) - # logger.debug("focus %s" % self.getFocusId()) - # Obtenemos el foco - focus = self.getFocusId() - - action = action.getId() - # Flecha izquierda - if action == xbmcgui.ACTION_MOVE_LEFT: - # Si el foco no está en ninguno de los 6 botones inferiores, y esta en un "list" cambiamos el valor - if focus not in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET, - ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - - # Localizamos en el listado de controles el control que tiene el focus - # todo mirar tema del cursor en el valor al desplazar lateralmente - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - return self.setFocus(self.controls[x].btn_delete_season) - elif focus == linea.edit_episode.getId(): - return self.setFocus(self.controls[x].edit_season) - elif focus == linea.btn_delete_season.getId(): - return self.setFocus(self.controls[x].edit_episode) - - # Si el foco está en alguno de los 6 botones inferiores, movemos al siguiente - else: - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - if focus == ID_BUTTON_ADD_SEASON: - self.setFocusId(ID_BUTTON_INFO) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # self.setFocusId(ID_CHECK_UPDATE_INTERNET) - elif focus == ID_BUTTON_INFO: - self.setFocusId(ID_BUTTON_ADD_SEASON) - elif focus == ID_CHECK_UPDATE_INTERNET: - self.setFocusId(ID_BUTTON_INFO) - - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - if focus == ID_BUTTON_OK: - self.setFocusId(ID_BUTTON_DELETE) - elif focus == ID_BUTTON_CANCEL: - self.setFocusId(ID_BUTTON_OK) - elif focus == ID_BUTTON_DELETE: - self.setFocusId(ID_BUTTON_CANCEL) - - # Flecha derecha - elif action == xbmcgui.ACTION_MOVE_RIGHT: - # Si el foco no está en ninguno de los 6 botones inferiores, y esta en un "list" cambiamos el valor - if focus not in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET, - ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - - # Localizamos en el listado de controles el control que tiene el focus - # todo mirar tema del cursor en el valor al desplazar lateralmente - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - return self.setFocus(self.controls[x].edit_episode) - elif focus == linea.edit_episode.getId(): - return self.setFocus(self.controls[x].btn_delete_season) - elif focus == linea.btn_delete_season.getId(): - return self.setFocus(self.controls[x].edit_season) - - # Si el foco está en alguno de los 6 botones inferiores, movemos al siguiente - else: - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - if focus == ID_BUTTON_ADD_SEASON: - self.setFocusId(ID_BUTTON_INFO) - if focus == ID_BUTTON_INFO: - self.setFocusId(ID_BUTTON_ADD_SEASON) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # self.setFocusId(ID_CHECK_UPDATE_INTERNET) - if focus == ID_CHECK_UPDATE_INTERNET: - self.setFocusId(ID_BUTTON_OK) - - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - if focus == ID_BUTTON_OK: - self.setFocusId(ID_BUTTON_CANCEL) - if focus == ID_BUTTON_CANCEL: - self.setFocusId(ID_BUTTON_DELETE) - if focus == ID_BUTTON_DELETE: - self.setFocusId(ID_BUTTON_OK) - - # Flecha arriba - elif action == xbmcgui.ACTION_MOVE_UP: - self.move_up(focus) - # Flecha abajo - elif action == xbmcgui.ACTION_MOVE_DOWN: - self.move_down(focus) - # scroll up - elif action == xbmcgui.ACTION_MOUSE_WHEEL_UP: - self.move_up(focus) - # scroll down - elif action == xbmcgui.ACTION_MOUSE_WHEEL_DOWN: - self.move_down(focus) - - # ACTION_PAGE_DOWN = 6 - # ACTION_PAGE_UP = 5 - - # Menú previo o Atrás - elif action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]: - self.close() - - def move_down(self, focus): - # logger.debug("focus " + str(focus)) - # Si el foco está en uno de los tres botones medios, bajamos el foco a la otra linea de botones - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - if focus == ID_BUTTON_ADD_SEASON: - self.setFocusId(ID_BUTTON_OK) - elif focus == ID_BUTTON_INFO: - self.setFocusId(ID_BUTTON_CANCEL) - elif focus == ID_CHECK_UPDATE_INTERNET: - self.setFocusId(ID_BUTTON_DELETE) - # Si el foco está en uno de los tres botones inferiores, subimos el foco al primer control del listado - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - first_visible = 0 - for x, linea in enumerate(self.controls): - if linea.get_visible(): - first_visible = x - break - - if focus == ID_BUTTON_OK: - self.setFocus(self.controls[first_visible].edit_season) - elif focus == ID_BUTTON_CANCEL: - self.setFocus(self.controls[first_visible].edit_episode) - elif focus == ID_BUTTON_DELETE: - self.setFocus(self.controls[first_visible].btn_delete_season) - # nos movemos entre los elementos del listado - else: - # Localizamos en el listado de controles el control que tiene el focus - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - if x + 1 < len(self.controls): - if not self.controls[x + 1].get_visible(): - self.scroll(x, 1) - - return self.setFocus(self.controls[x + 1].edit_season) - else: - return self.setFocusId(ID_BUTTON_ADD_SEASON) - elif focus == linea.edit_episode.getId(): - if x + 1 < len(self.controls): - if not self.controls[x + 1].get_visible(): - self.scroll(x, 1) - - return self.setFocus(self.controls[x + 1].edit_episode) - else: - self.setFocusId(ID_BUTTON_INFO) - elif focus == linea.btn_delete_season.getId(): - if x + 1 < len(self.controls): - if not self.controls[x + 1].get_visible(): - self.scroll(x, 1) - - return self.setFocus(self.controls[x + 1].btn_delete_season) - else: - return self.setFocusId(ID_BUTTON_INFO) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # return self.setFocusId(ID_CHECK_UPDATE_INTERNET) - - def move_up(self, focus): - # Si el foco está en uno de los tres botones medios, subimos el foco al último control del listado - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - last_visible = 0 - for x, linea in reversed(list(enumerate(self.controls))): - if linea.get_visible(): - last_visible = x - break - - if focus == ID_BUTTON_ADD_SEASON: - self.setFocus(self.controls[last_visible].edit_season) - elif focus == ID_BUTTON_INFO: - self.setFocus(self.controls[last_visible].edit_episode) - elif focus == ID_CHECK_UPDATE_INTERNET: - self.setFocus(self.controls[last_visible].btn_delete_season) - # Si el foco está en uno de los tres botones inferiores, subimos el foco a la otra linea de botones - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - if focus == ID_BUTTON_OK: - self.setFocusId(ID_BUTTON_ADD_SEASON) - elif focus == ID_BUTTON_CANCEL: - self.setFocusId(ID_BUTTON_INFO) - elif focus == ID_BUTTON_DELETE: - self.setFocusId(ID_BUTTON_INFO) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # self.setFocusId(ID_CHECK_UPDATE_INTERNET) - # nos movemos entre los elementos del listado - else: - # Localizamos en el listado de controles el control que tiene el focus - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - if x > 0: - if not self.controls[x - 1].get_visible(): - self.scroll(x, -1) - - return self.setFocus(self.controls[x - 1].edit_season) - else: - return self.setFocusId(ID_BUTTON_OK) - elif focus == linea.edit_episode.getId(): - if x > 0: - if not self.controls[x - 1].get_visible(): - self.scroll(x, -1) - - return self.setFocus(self.controls[x - 1].edit_episode) - else: - self.setFocusId(ID_BUTTON_CANCEL) - elif focus == linea.btn_delete_season.getId(): - if x > 0: - if not self.controls[x - 1].get_visible(): - self.scroll(x, -1) - - return self.setFocus(self.controls[x - 1].btn_delete_season) - else: - return self.setFocusId(ID_BUTTON_DELETE) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # return self.setFocusId(ID_CHECK_UPDATE_INTERNET) - - def scroll(self, position, movement): - try: - for index, group in enumerate(self.controls): - # ponemos todos los elementos como no visibles - group.set_visible(False) - - if movement > 0: - pos_fin = position + movement + 1 - pos_inicio = pos_fin - 5 - else: - pos_inicio = position + movement - pos_fin = pos_inicio + 5 - - # logger.debug("position {}, movement {}, pos_inicio{}, pos_fin{}, self.data.length{}". - # format(position, movement, pos_inicio, pos_fin, len(self.data))) - pos_y = self.controls_bg.getY() + 10 - for i in range(pos_inicio, pos_fin): - pos_x = self.controls_bg.getX() + 15 - - self.controls[i].label_season.setPosition(pos_x, pos_y + 3) - - pos_x += self.controls[i].label_season.getWidth() + 5 - self.controls[i].edit_season.setPosition(pos_x, pos_y - 2) - - pos_x += self.controls[i].edit_season.getWidth() + 60 - self.controls[i].label_episode.setPosition(pos_x, pos_y + 3) - - pos_x += self.controls[i].label_episode.getWidth() + 5 - self.controls[i].edit_episode.setPosition(pos_x, pos_y - 2) - - self.controls[i].btn_delete_season.setPosition( - self.controls_bg.getX() + self.controls_bg.getWidth() - - self.controls[i].btn_delete_season.getWidth() - 14, - pos_y) - - self.controls[i].hb.setPosition(self.controls_bg.getX() + 10, pos_y + 40) - - pos_y += 50 - - # logger.debug("ponemos como True %s" % i) - self.controls[i].set_visible(True) - - self.move_scroll() - - except Exception, Ex: - logger.error("HA HABIDO UNA HOSTIA %s" % Ex) - - def move_scroll(self): - visible_controls = [group for group in self.controls if group.get_visible() == True] - hidden_controls = [group for group in self.controls if group.get_visible() == False] - scroll_position = self.controls.index(visible_controls[0]) - scrollbar_height = self.scroll_bg.getHeight() - (len(hidden_controls) * 10) - scrollbar_y = self.scroll_bg.getPosition()[1] + (scroll_position * 10) - self.scroll2_bg.setPosition(self.scroll_bg.getPosition()[0], scrollbar_y) - self.scroll2_bg.setHeight(scrollbar_height) - - @staticmethod - def method_info(): - title = config.get_localized_string(60348) - # text = "La primera temporada que se añade siempre empieza en \"0\" episodios, la segunda temporada que se " - # text += "añade empieza en el número total de episodios de la primera temporada, la tercera temporada será " - # text += "la suma de los episodios de las temporadas previas y así sucesivamente.\n" - # text += "[COLOR blue]\nEjemplo de serie divida en varias temporadas:\n" - # text += "\nFairy Tail:\n" - # text += " - SEASON 1: EPISODE 48 --> [season 1, episode: 0]\n" - # text += " - SEASON 2: EPISODE 48 --> [season 2, episode: 48]\n" - # text += " - SEASON 3: EPISODE 54 --> [season 3, episode: 96 ([48=season2] + [48=season1])]\n" - # text += " - SEASON 4: EPISODE 175 --> [season 4: episode: 150 ([54=season3] + [48=season2] + [48=season3" \ - # "])][/COLOR]\n" - # text += "[COLOR green]\nEjemplo de serie que continua en la temporada de la original:\n" - # text += "\nFate/Zero 2nd Season:\n" - # text += " - SEASON 1: EPISODE 12 --> [season 1, episode: 13][/COLOR]\n" - - # text += "[COLOR blue]\nEjemplo de serie que es la segunda temporada de la original:\n" - # text += "\nFate/kaleid liner Prisma☆Illya 2wei!:\n" - # text += " - SEASON 1: EPISODE 12 --> [season 2, episode: 0][/COLOR]\n" - text = config.get_localized_string(70602) - - return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text) - - - class ControlGroup: - """ - conjunto de controles, son los elementos que se muestra por línea de una lista. - """ - - def __init__(self, label_season, edit_season, label_episode, edit_episode, btn_delete_season, hb): - self.visible = False - self.label_season = label_season - self.edit_season = edit_season - self.label_episode = label_episode - self.edit_episode = edit_episode - self.btn_delete_season = btn_delete_season - self.hb = hb - - def list_elements(self): - return [self.label_season, self.edit_season, self.label_episode, self.edit_episode, self.btn_delete_season, - self.hb] - - def get_visible(self): - return self.visible - - def set_visible(self, visible): - self.visible = visible - self.label_season.setVisible(visible) - self.edit_season.setVisible(visible) - self.label_episode.setVisible(visible) - self.edit_episode.setVisible(visible) - self.btn_delete_season.setVisible(visible) - self.hb.setVisible(visible) - - - class TextBox(xbmcgui.WindowXMLDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - self.title = kwargs.get('title') - self.text = kwargs.get('text') - self.doModal() - - def onInit(self): - try: - self.getControl(5).setText(self.text) - self.getControl(1).setLabel(self.title) - except: - pass - - def onClick(self, control_id): - pass - - def onFocus(self, control_id): - pass - - def onAction(self, action): - self.close() - - # TODO mirar retro-compatiblidad - # class ControlEdit(xbmcgui.ControlButton): - # def __new__(self, *args, **kwargs): - # del kwargs["isPassword"] - # del kwargs["window"] - # args = list(args) - # return xbmcgui.ControlButton.__new__(self, *args, **kwargs) - # - # def __init__(self, *args, **kwargs): - # self.isPassword = kwargs["isPassword"] - # self.window = kwargs["window"] - # self.label = "" - # self.text = "" - # self.textControl = xbmcgui.ControlLabel(self.getX(), self.getY(), self.getWidth(), self.getHeight(), - # self.text, - # font=kwargs["font"], textColor=kwargs["textColor"], alignment=4 | 1) - # self.window.addControl(self.textControl) - # - # def setLabel(self, val): - # self.label = val - # xbmcgui.ControlButton.setLabel(self, val) - # - # def getX(self): - # return xbmcgui.ControlButton.getPosition(self)[0] - # - # def getY(self): - # return xbmcgui.ControlButton.getPosition(self)[1] - # - # def setEnabled(self, e): - # xbmcgui.ControlButton.setEnabled(self, e) - # self.textControl.setEnabled(e) - # - # def setWidth(self, w): - # xbmcgui.ControlButton.setWidth(self, w) - # self.textControl.setWidth(w / 2) - # - # def setHeight(self, w): - # xbmcgui.ControlButton.setHeight(self, w) - # self.textControl.setHeight(w) - # - # def setPosition(self, x, y): - # xbmcgui.ControlButton.setPosition(self, x, y) - # self.textControl.setPosition(x + self.getWidth() / 2, y) - # - # def setText(self, text): - # self.text = text - # if self.isPassword: - # self.textControl.setLabel("*" * len(self.text)) - # else: - # self.textControl.setLabel(self.text) - # - # def getText(self): - # return self.text - # - # - # if not hasattr(xbmcgui, "ControlEdit"): - # xbmcgui.ControlEdit = ControlEdit diff --git a/channels/ricettevideo.json b/channels/ricettevideo.json deleted file mode 100644 index 709353b6..00000000 --- a/channels/ricettevideo.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "id": "ricettevideo", - "name": "Ricette Video", - "language": ["ita"], - "active": true, - "adult": false, - "thumbnail": "http://ricettevideo.net/wp-content/uploads/2013/08/Ricette-Video-Logo.png", - "banner": "http://ricettevideo.net/wp-content/uploads/2013/08/Ricette-Video-Logo.png", - "categories": ["documentary"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false - }, - { - "id": "include_in_newest_documentales", - "type": "bool", - "label": "Includi in Novità - Documentari", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - } - ] -} - diff --git a/channels/ricettevideo.py b/channels/ricettevideo.py deleted file mode 100644 index 26374b3e..00000000 --- a/channels/ricettevideo.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Ringraziamo Icarus crew -# Canale ricettevideo -# ------------------------------------------------------------ -import re -import urlparse - -from core import httptools, scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = "http://ricettevideo.net" - - -def mainlist(item): - logger.info("kod.ricettevideo mainlist") - itemlist = [Item(channel=item.channel, title="[COLOR azure]Videoricette[/COLOR]", action="peliculas", - url=host, - thumbnail="http://www.brinkmanscountrycorner.com/images/Recipies.png")] - - return itemlist - - -def peliculas(item): - logger.info("kod.ricettevideo peliculas") - itemlist = [] - - # Carica la pagina - data = httptools.downloadpage(item.url).data - - # Estrae i contenuti - patron = '
    \s*]+>' - matches = re.compile(patron, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for scrapedurl, scrapedtitle, scrapedthumbnail in matches: - scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) - scrapedplot = "" - itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, - title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, - folder=True)) - - # Paginazione - patronvideos = '' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - if len(matches) > 0: - scrapedurl = urlparse.urljoin(item.url, matches[0]) - itemlist.append( - Item(channel=item.channel, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]", url=scrapedurl, - folder=True)) - - return itemlist - -# test update diff --git a/channels/seriehd.py b/channels/seriehd.py index 1c56836d..a60c691e 100644 --- a/channels/seriehd.py +++ b/channels/seriehd.py @@ -4,29 +4,19 @@ # ------------------------------------------------------------ -from core import scrapertoolsV2, httptools, support +from core import scrapertools, httptools, support from core.item import Item -__channel__ = 'seriehd' -host = support.config.get_channel_url(__channel__) +host = support.config.get_channel_url() +headers = [['Referer', host]] -# host = 'https://www.seriehd.watch' -headers = '' -def findhost(): - pass - # global host, headers - # data= httptools.downloadpage('https://seriehd.nuovo.link/').data - # global host, headers - # host = scrapertoolsV2.find_single_match(data, r'
    (?P.*?)</h2>\s*<img src="(?P<thumb>[^"]+)" alt="[^"]*" />\s*<A HREF="(?P<url>[^"]+)">.*?<span class="year">(?:(?P<year>[0-9]{4}))?.*?<span class="calidad">(?:(?P<quality>[A-Z]+))?.*?</span>' patronNext=r'<span class="current">\d+</span><a rel="nofollow" class="page larger" href="([^"]+)">\d+</a>' action='episodios' @@ -45,7 +33,6 @@ def peliculas(item): @support.scrape def episodios(item): - #findhost() data ='' url = support.match(item, patronBlock=r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">')[1] seasons = support.match(item, r'<a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0] @@ -63,7 +50,6 @@ def episodios(item): @support.scrape def genre(item): - #findhost() patronMenu = '<a href="(?P<url>[^"]+)">(?P<title>[^<]+)</a>' blacklist = ['Serie TV','Serie TV Americane','Serie TV Italiane','altadefinizione'] patronBlock = '<ul class="sub-menu">(?P<block>.*)</ul>' @@ -73,7 +59,7 @@ def genre(item): def search(item, texto): support.log(texto) - findhost() + item.contentType = 'tvshow' item.url = host + "/?s=" + texto @@ -88,7 +74,7 @@ def search(item, texto): def newest(categoria): support.log(categoria) - findhost() + itemlist = [] item = support.Item() try: diff --git a/channels/serietvonline.py b/channels/serietvonline.py index 636ca41e..dffbf9a2 100644 --- a/channels/serietvonline.py +++ b/channels/serietvonline.py @@ -16,21 +16,18 @@ - Prima fare la 'Rinumerazione' dal menu contestuale dal titolo della serie """ import re -from core import support, httptools, scrapertoolsV2 +from core import support, httptools, scrapertools from platformcode import config from core.item import Item -__channel__ = "serietvonline" - -host = "https://serietvonline.monster" -headers = "" def findhost(): - pass - # global host, headers - # data = httptools.downloadpage('https://serietvonline.me/').data - # host = scrapertoolsV2.find_single_match(data, r'<a class="pure-button pure-button-primary" title=\'serie tv online\' href="([^"]+)">') - # headers = [['Referer', host]] + data = httptools.downloadpage('https://serietvonline.me/').data + host = scrapertools.find_single_match(data, r'<a class="pure-button pure-button-primary" title=\'serie tv online\' href="([^"]+)">') + return host + +host = config.get_channel_url(findhost) +headers = [['Referer', host]] list_servers = ['akvideo', 'wstream', 'backin', 'vidtome', 'nowvideo'] list_quality = ['default'] @@ -39,7 +36,7 @@ list_quality = ['default'] @support.menu def mainlist(item): support.log() - findhost() + film = ['/ultimi-film-aggiunti/', ('Lista', ['/lista-film/', 'peliculas', 'lista']) @@ -55,7 +52,7 @@ def mainlist(item): anime = ['/lista-cartoni-animati-e-anime/'] - documentari = [('Documentari', ['/lista-documentari/' , 'peliculas' , 'doc', 'tvshow'])] + documentari = [('Documentari bullet bold', ['/lista-documentari/' , 'peliculas' , 'doc', 'tvshow'])] search = '' @@ -129,7 +126,7 @@ def episodios(item): def search(item, text): support.log("CERCA :" ,text, item) - findhost() + item.url = "%s/?s=%s" % (host, text) try: @@ -144,7 +141,7 @@ def search(item, text): def newest(categoria): support.log(categoria) - findhost() + itemlist = [] item = Item() @@ -183,8 +180,8 @@ def findvideos(item): data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) #support.log("DATA - HTML:\n", data) - url_video = scrapertoolsV2.find_single_match(data, r'<tr><td>(.+?)</td><tr>', -1) - url_serie = scrapertoolsV2.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>') + url_video = scrapertools.find_single_match(data, r'<tr><td>(.+?)</td><tr>', -1) + url_serie = scrapertools.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>') goseries = support.typo("Vai alla Serie:", ' bold') series = support.typo(item.contentSerieName, ' bold color kod') itemlist = support.server(item, data=url_video) diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py index 659a90fb..1345fffa 100644 --- a/channels/serietvsubita.py +++ b/channels/serietvsubita.py @@ -12,8 +12,7 @@ from core.item import Item from core.support import log from platformcode import logger, config -__channel__ = "serietvsubita" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} diff --git a/channels/serietvu.py b/channels/serietvu.py index 35afa4bb..c4c9bd2f 100644 --- a/channels/serietvu.py +++ b/channels/serietvu.py @@ -8,13 +8,12 @@ """ import re -from core import support, httptools, scrapertoolsV2 +from core import support, httptools, scrapertools from core.item import Item from core.support import log from platformcode import config -__channel__ = 'serietvu' -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', host]] list_servers = ['speedvideo'] @@ -119,8 +118,8 @@ def findvideos(item): data = httptools.downloadpage(item.url, headers=headers).data data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) - url_video = scrapertoolsV2.find_single_match(data, r'<div class="item"> <a data-id="[^"]+" data-href="([^"]+)" data-original="[^"]+"[^>]+> <div> <div class="title">Episodio \d+', -1) - url_serie = scrapertoolsV2.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>') + url_video = scrapertools.find_single_match(data, r'<div class="item"> <a data-id="[^"]+" data-href="([^"]+)" data-original="[^"]+"[^>]+> <div> <div class="title">Episodio \d+', -1) + url_serie = scrapertools.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>') goseries = support.typo(">> Vai alla Serie:", ' bold') series = support.typo(item.contentSerieName, ' bold color kod') diff --git a/channels/streamingaltadefinizione.py b/channels/streamingaltadefinizione.py index ce616fb3..0240c786 100644 --- a/channels/streamingaltadefinizione.py +++ b/channels/streamingaltadefinizione.py @@ -7,28 +7,26 @@ from core import support, httptools from core.item import Item from platformcode import config -# __channel__ = "streamingaltadefinizione" -# host = config.get_channel_url(__channel__) -host = headers = '' list_servers = ['verystream', 'openload', 'wstream'] list_quality = ['1080p', 'HD', 'DVDRIP', 'SD', 'CAM'] def findhost(): - global host, headers permUrl = httptools.downloadpage('https://www.popcornstream.info', follow_redirects=False).headers if 'google' in permUrl['location']: + host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') if host[:4] != 'http': host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') - else: - host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') else: host = permUrl['location'] - headers = [['Referer', host]] + return host + +host = config.get_channel_url(findhost) +headers = [['Referer', host]] @support.menu def mainlist(item): - findhost() + film = ["/film/"] anime = ["/genere/anime/"] tvshow = ["/serietv/"] @@ -54,17 +52,17 @@ def generos(item): def peliculas(item): - findhost() + return support.dooplay_peliculas(item, True if "/genere/" in item.url else False) def episodios(item): - findhost() + return support.dooplay_get_episodes(item) def findvideos(item): - findhost() + itemlist = [] for link in support.dooplay_get_links(item, host): if link['title'] != 'Guarda il trailer': diff --git a/channels/streamtime.py b/channels/streamtime.py index 9301a723..3aad1040 100644 --- a/channels/streamtime.py +++ b/channels/streamtime.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from core import support, httptools, scrapertoolsV2 +from core import support, httptools, scrapertools from core.item import Item from platformcode import config, logger @@ -9,8 +9,7 @@ Nota per i tester: questo non è un canale 'tradizionale', essendo un canale tel la lista delle pagine non sarà affatto 'uniforme' (a seconda di come viene presentata la preview) """ -__channel__ = "streamtime" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = [['Referer', 'org.telegram.messenger']] list_servers = ['directo'] list_quality = ['default'] @@ -114,7 +113,7 @@ def episodios(item): stagioni[st] = nEp itemlist = [] - domain, id = scrapertoolsV2.find_single_match(url, r'(https?://[a-z0-9.-]+)/[^/]+/([^-/]+)') + domain, id = scrapertools.find_single_match(url, r'(https?://[a-z0-9.-]+)/[^/]+/([^-/]+)') for st in sorted(stagioni.keys()): season = st[1:] episode = stagioni[st] @@ -141,7 +140,7 @@ def episodios(item): def findvideos(item): # support.dbg() - domain = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + domain = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') if item.contentType == 'movie': id = item.url.split('/')[-1] url = domain + '/play_f.php?f=' + id diff --git a/channels/tantifilm.py b/channels/tantifilm.py index f2c3c796..9b17b2d8 100644 --- a/channels/tantifilm.py +++ b/channels/tantifilm.py @@ -5,25 +5,22 @@ import re -from core import scrapertoolsV2, httptools, tmdb, support +from core import scrapertools, httptools, tmdb, support from core.item import Item -from core.support import log +from core.support import menu, log from platformcode import logger from specials import autorenumber from platformcode import config, unify from lib.unshortenit import unshorten_only -host = 'https://www.tantifilm.eu' -headers = '' -def findhost(): - pass - # global host, headers - # permUrl = httptools.downloadpage('https://www.tantifilm.info/', follow_redirects=False).data - # host = scrapertoolsV2.find_single_match(permUrl, r'<h2 style="text-align: center;"><a href="([^"]+)">Il nuovo indirizzo di Tantifilm è:</a></h2>') - # if host.endswith('/'): - # host = host[:-1] - # headers = [['Referer', host]] +def findhost(): + permUrl = httptools.downloadpage('https://www.tantifilm.info/', follow_redirects=False).data + host = 'https://' + scrapertools.find_single_match(permUrl, r'Ora siamo ([A-Za-z0-9./]+)') + return host + +host = config.get_channel_url(findhost) +headers = [['Referer', host]] list_servers = ['verystream', 'openload', 'streamango', 'vidlox', 'youtube'] list_quality = ['default'] @@ -31,7 +28,7 @@ list_quality = ['default'] @support.menu def mainlist(item): log() - findhost() + #top = [(support.typo('Novità Film/Serie/Anime/Altro', 'bold'),['/film/'])] top = [('Novità Film/Serie/Anime/Altro', ['/film/', 'peliculas', 'all'])] @@ -59,7 +56,7 @@ def mainlist(item): @support.scrape def peliculas(item): log() - findhost() + if item.args == 'search': patron = r'<a href="(?P<url>[^"]+)" title="Permalink to\s(?P<title>[^"]+) \((?P<year>[^<]+)\).*?".*?<img[^s]+src="(?P<thumb>[^"]+)".*?<div class="calitate">\s*<p>(?P<quality>[^<]+)<\/p>' @@ -79,7 +76,7 @@ def peliculas(item): @support.scrape def episodios(item): log() - findhost() + if not item.data: data_check = httptools.downloadpage(item.url, headers=headers).data data_check = re.sub('\n|\t', ' ', data_check) @@ -87,7 +84,7 @@ def episodios(item): else: data_check = item.data patron_check = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">' - item.url = scrapertoolsV2.find_single_match(data_check, patron_check) + item.url = scrapertools.find_single_match(data_check, patron_check) patronBlock = r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(?P<block>.*?)<\/ul>' patron = r'<a href="(?P<url>[^"]+)"\s*>\s*<i[^>]+><\/i>\s*(?P<episode>\d+)<\/a>' @@ -102,8 +99,8 @@ def episodios(item): season_data = httptools.downloadpage(item.url).data season_data = re.sub('\n|\t', ' ', season_data) season_data = re.sub(r'>\s+<', '> <', season_data) - block = scrapertoolsV2.find_single_match(season_data, 'Episodi.*?<ul class="nav navbar-nav">(.*?)</ul>') - episodes = scrapertoolsV2.find_multiple_matches(block, '<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>') + block = scrapertools.find_single_match(season_data, 'Episodi.*?<ul class="nav navbar-nav">(.*?)</ul>') + episodes = scrapertools.find_multiple_matches(block, '<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>') for url, episode in episodes: i = item.clone() i.action = 'findvideos' @@ -116,13 +113,9 @@ def episodios(item): #debug = True return locals() -def player_or_not(item): - - return item - def category(item): log() - findhost() + blacklist = ['Serie TV Altadefinizione', 'HD AltaDefinizione', 'Al Cinema', 'Serie TV', 'Miniserie', 'Programmi Tv', 'Live', 'Trailers', 'Serie TV Aggiornate', 'Aggiornamenti', 'Featured'] itemlist = support.scrape(item, '<li><a href="([^"]+)"><span></span>([^<]+)</a></li>', ['url', 'title'], headers, blacklist, patron_block='<ul class="table-list">(.*?)</ul>', action='peliculas') return support.thumb(itemlist) @@ -133,9 +126,9 @@ def anime(item): seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0] for season in seasons: - episodes = scrapertoolsV2.find_multiple_matches(season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)') + episodes = scrapertools.find_multiple_matches(season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)') for url, title, urls, none in episodes: - urls = scrapertoolsV2.find_multiple_matches(urls, '<a.*?href="([^"]+)"[^>]+>') + urls = scrapertools.find_multiple_matches(urls, '<a.*?href="([^"]+)"[^>]+>') for url2 in urls: url += url2 + '\n' @@ -160,7 +153,7 @@ def anime(item): def search(item, texto): log(texto) - findhost() + item.url = host + "/?s=" + texto try: @@ -189,8 +182,8 @@ def search(item, texto): ## for url, title, year, thumb, quality in matches: ## infoLabels = {} ## infoLabels['year'] = year -## title = scrapertoolsV2.decodeHtmlentities(title) -## quality = scrapertoolsV2.decodeHtmlentities(quality) +## title = scrapertools.decodeHtmlentities(title) +## quality = scrapertools.decodeHtmlentities(quality) ## longtitle = title + support.typo(quality,'_ [] color kod') ## itemlist.append( ## Item(channel=item.channel, @@ -218,7 +211,7 @@ def newest(categoria): matches = support.match(item, r'mediaWrapAlt recomended_videos"[^>]+>\s*<a href="([^"]+)" title="([^"]+)" rel="bookmark">\s*<img[^s]+src="([^"]+)"[^>]+>')[0] for url, title, thumb in matches: - title = scrapertoolsV2.decodeHtmlentities(title).replace("Permalink to ", "").replace("streaming", "") + title = scrapertools.decodeHtmlentities(title).replace("Permalink to ", "").replace("streaming", "") title = re.sub(r'\s\(\d+\)','',title) itemlist.append( Item(channel=item.channel, @@ -247,7 +240,7 @@ def findvideos(item): data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) - check = scrapertoolsV2.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>') + check = scrapertools.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>') if 'sub' in check.lower(): item.contentLanguage = 'Sub-ITA' support.log("CHECK : ", check) @@ -262,7 +255,7 @@ def findvideos(item): return episodios(item) if 'protectlink' in data: - urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"') + urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"') support.log("SONO QUI: ", urls) for url in urls: url = url.decode('base64') @@ -270,11 +263,11 @@ def findvideos(item): url, c = unshorten_only(url) if 'nodmca' in url: page = httptools.downloadpage(url, headers=headers).data - url = '\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">') - if url: - listurl.add(url) + url = '\t' + scrapertools.find_single_match(page, '<meta name="og:url" content="([^=]+)">') + if url: + listurl.add(url) data += '\n'.join(listurl) - return support.server(item, data) # , headers=headers) + return support.server(item, data)#, headers=headers) # return itemlist ##def findvideos(item): @@ -284,7 +277,7 @@ def findvideos(item): ## data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url, headers=headers).data ## ## if 'protectlink' in data: -## urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"') +## urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"') ## for url in urls: ## url = url.decode('base64') ## data += '\t' + url diff --git a/channels/toonitalia.py b/channels/toonitalia.py index 693861cc..7736f26d 100644 --- a/channels/toonitalia.py +++ b/channels/toonitalia.py @@ -5,8 +5,7 @@ from core import support -__channel__ = "toonitalia" -host = support.config.get_channel_url(__channel__) +host = support.config.get_channel_url() headers = [['Referer', host]] diff --git a/channels/vedohd.py b/channels/vedohd.py index 1c71a669..9ab91a48 100644 --- a/channels/vedohd.py +++ b/channels/vedohd.py @@ -3,13 +3,12 @@ # Canale per vedohd # ------------------------------------------------------------ -from core import scrapertoolsV2, httptools, support +from core import scrapertools, httptools, support from core.item import Item from platformcode import logger, config from specials import autoplay -__channel__ = "vedohd" -host = config.get_channel_url(__channel__) +host = config.get_channel_url() headers = "" IDIOMAS = {'Italiano': 'IT'} @@ -48,7 +47,7 @@ def findvideos(item): for link in support.dooplay_get_links(item, host): if link['title'] != 'Trailer': logger.info(link['title']) - server, quality = scrapertoolsV2.find_single_match(link['title'], '([^ ]+) ?(HD|3D)?') + server, quality = scrapertools.find_single_match(link['title'], '([^ ]+) ?(HD|3D)?') if quality: title = server + " [COLOR blue][" + quality + "][/COLOR]" else: diff --git a/channels/vvvvid.py b/channels/vvvvid.py index 8f6025ee..7c4975ef 100644 --- a/channels/vvvvid.py +++ b/channels/vvvvid.py @@ -8,8 +8,8 @@ from core.item import Item from specials import autorenumber from lib.concurrent import futures -__channel__ = "vvvvid" -host = support.config.get_channel_url(__channel__) + +host = support.config.get_channel_url() # Creating persistent session current_session = requests.Session() diff --git a/core/cloudflare.py b/core/cloudflare.py deleted file mode 100644 index 529e65c6..00000000 --- a/core/cloudflare.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- -# -------------------------------------------------------------------------------- -# Cloudflare decoder -# -------------------------------------------------------------------------------- - -import re -import time -import urllib - -import urlparse - -from platformcode import logger - - -class Cloudflare: - def __init__(self, response): - self.timeout = 5 - self.domain = urlparse.urlparse(response["url"])[1] - self.protocol = urlparse.urlparse(response["url"])[0] - self.js_data = {} - self.header_data = {} - if not "var s,t,o,p,b,r,e,a,k,i,n,g,f" in response["data"] or "chk_jschl" in response["url"]: - return - try: - self.js_data["data"] = response["data"] - self.js_data["auth_url"] = \ - re.compile('<form id="challenge-form" action="([^"]+)" method="get">').findall(response["data"])[0] - self.js_data["params"] = {} - self.js_data["params"]["jschl_vc"] = \ - re.compile('<input type="hidden" name="jschl_vc" value="([^"]+)"/>').findall(response["data"])[0] - self.js_data["params"]["pass"] = \ - re.compile('<input type="hidden" name="pass" value="([^"]+)"/>').findall(response["data"])[0] - self.js_data["wait"] = int(re.compile("\}, ([\d]+)\);", re.MULTILINE).findall(response["data"])[0]) / 1000 - self.js_data["params"]["s"] = \ - re.compile('<input type="hidden" name="s" value="([^"]+)"').findall(response["data"])[0] - except: - logger.debug("Metodo #1 (javascript): NO disponible") - self.js_data = {} - if "refresh" in response["headers"]: - try: - self.header_data["wait"] = int(response["headers"]["refresh"].split(";")[0]) - self.header_data["auth_url"] = response["headers"]["refresh"].split("=")[1].split("?")[0] - self.header_data["params"] = {} - self.header_data["params"]["pass"] = response["headers"]["refresh"].split("=")[2] - except: - logger.debug("Metodo #2 (headers): NO disponible") - self.header_data = {} - - def solve_cf(self, body, domain): - js = re.search( - r"setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n", - body - ).group(1) - - js = re.sub(r"a\.value = ((.+).toFixed\(10\))?", r"\1", js) - js = re.sub(r'(e\s=\sfunction\(s\)\s{.*?};)', '', js, flags=re.DOTALL|re.MULTILINE) - js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js).replace("t.length", str(len(domain))) - js = js.replace('; 121', '') - js = re.sub(r"[\n\\']", "", js) - jsEnv = """ - var t = "{domain}"; - var g = String.fromCharCode; - o = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; - e = function(s) {{ - s += "==".slice(2 - (s.length & 3)); - var bm, r = "", r1, r2, i = 0; - for (; i < s.length;) {{ - bm = o.indexOf(s.charAt(i++)) << 18 | o.indexOf(s.charAt(i++)) << 12 | (r1 = o.indexOf(s.charAt(i++))) << 6 | (r2 = o.indexOf(s.charAt(i++))); - r += r1 === 64 ? g(bm >> 16 & 255) : r2 === 64 ? g(bm >> 16 & 255, bm >> 8 & 255) : g(bm >> 16 & 255, bm >> 8 & 255, bm & 255); - }} - return r; - }}; - function italics (str) {{ return '<i>' + this + '</i>'; }}; - var document = {{ - getElementById: function () {{ - return {{'innerHTML': '{innerHTML}'}}; - }} - }}; - {js} - """ - innerHTML = re.search('<div(?: [^<>]*)? id="([^<>]*?)">([^<>]*?)<\/div>', body , re.MULTILINE | re.DOTALL) - innerHTML = innerHTML.group(2).replace("'", r"\'") if innerHTML else "" - import js2py - from jsc import jsunc - js = jsunc(jsEnv.format(domain=domain, innerHTML=innerHTML, js=js)) - def atob(s): - return base64.b64decode('{}'.format(s)).decode('utf-8') - js2py.disable_pyimport() - context = js2py.EvalJs({'atob': atob}) - result = context.eval(js) - return float(result) - - - @property - def wait_time(self): - if self.js_data.get("wait", 0): - return self.js_data["wait"] - else: - return self.header_data.get("wait", 0) - - @property - def is_cloudflare(self): - return self.header_data.get("wait", 0) > 0 or self.js_data.get("wait", 0) > 0 - - def get_url(self): - # Metodo #1 (javascript) - if self.js_data.get("wait", 0): - self.js_data["params"]["jschl_answer"] = self.solve_cf(self.js_data["data"], self.domain) - response = "%s://%s%s?%s" % ( - self.protocol, self.domain, self.js_data["auth_url"], urllib.urlencode(self.js_data["params"])) - time.sleep(self.js_data["wait"]) - return response diff --git a/core/httptools.py b/core/httptools.py index 136cc946..b6549df8 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -17,7 +17,7 @@ from threading import Lock from core.jsontools import to_utf8 from platformcode import config, logger from platformcode.logger import WebErrorException -from core import scrapertoolsV2 +from core import scrapertools # Get the addon version __version = config.get_addon_version() @@ -48,7 +48,7 @@ def get_user_agent(): def get_url_headers(url, forced=False): domain = urlparse.urlparse(url)[1] - sub_dom = scrapertoolsV2.find_single_match(domain, r'\.(.*?\.\w+)') + sub_dom = scrapertools.find_single_match(domain, r'\.(.*?\.\w+)') if sub_dom and not 'google' in url: domain = sub_dom domain_cookies = cj._cookies.get("." + domain, {}).get("/", {}) @@ -144,34 +144,6 @@ def random_useragent(): return default_headers["User-Agent"] -def channel_proxy_list(url, forced_proxy=None): - import base64 - import ast - - try: - proxy_channel_bloqued_str = base64.b64decode(config.get_setting - ('proxy_channel_bloqued')).decode('utf-8') - proxy_channel_bloqued = dict() - proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str) - except: - logger.debug('Proxytools not initialized correctly') - return False - - if not url.endswith('/'): - url += '/' - if scrapertoolsV2.find_single_match(url, r'(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)') \ - in proxy_channel_bloqued: - if forced_proxy and forced_proxy not in ['Total', 'ProxyDirect', 'ProxyCF', 'ProxyWeb']: - if forced_proxy in proxy_channel_bloqued[scrapertoolsV2.find_single_match(url, r'(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)')]: - return True - else: - return False - if forced_proxy: - return True - if not 'OFF' in proxy_channel_bloqued[scrapertoolsV2.find_single_match(url, r'(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)')]: - return True - - return False def show_infobox(info_dict): logger.info() @@ -232,137 +204,6 @@ def show_infobox(info_dict): logger.info('%s%s%s' % (box['r_dn_corner'], box['fill'] * width, box['l_dn_corner'])) return -def check_proxy(url, **opt): - proxy_data = dict() - proxy_data['dict'] = {} - proxy = opt.get('proxy', True) - proxy_web = opt.get('proxy_web', False) - proxy_addr_forced = opt.get('proxy_addr_forced', None) - forced_proxy = opt.get('forced_proxy', None) - - try: - if (proxy or proxy_web) and (forced_proxy or proxy_addr_forced or - channel_proxy_list(url, forced_proxy=forced_proxy)): - import proxytools - proxy_data['addr'], proxy_data['CF_addr'], proxy_data['web_name'], \ - proxy_data['log'] = proxytools.get_proxy_addr(url, post=opt.get('post', None), forced_proxy=forced_proxy) - - if proxy_addr_forced and proxy_data['log']: - proxy_data['log'] = scrapertoolsV2.find_single_match(str(proxy_addr_forced), r"{'http.*':\s*'(.*?)'}") - - if proxy and proxy_data['addr']: - if proxy_addr_forced: proxy_data['addr'] = proxy_addr_forced - proxy_data['dict'] = proxy_data['addr'] - proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log'] - elif proxy and proxy_data['CF_addr']: - if proxy_addr_forced: proxy_data['CF_addr'] = proxy_addr_forced - proxy_data['dict'] = proxy_data['CF_addr'] - proxy_data['stat'] = ', Proxy CF ' + proxy_data['log'] - elif proxy and proxy_addr_forced: - proxy_data['addr'] = proxy_addr_forced - proxy_data['dict'] = proxy_data['addr'] - proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log'] - elif proxy and not proxy_data['addr'] and not proxy_data['CF_addr'] \ - and not proxy_addr_forced: - proxy = False - if not proxy_data['web_name']: - proxy_data['addr'], proxy_data['CF_addr'], proxy_data['web_name'], \ - proxy_data['log'] = proxytools.get_proxy_addr(url, forced_proxy='Total') - if proxy_data['web_name']: - proxy_web = True - else: - proxy_web = False - if proxy_data['addr']: - proxy = True - proxy_data['dict'] = proxy_data['addr'] - proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log'] - - if proxy_web and proxy_data['web_name']: - if opt.get('post', None): proxy_data['log'] = '(POST) ' + proxy_data['log'] - url, opt['post'], headers_proxy, proxy_data['web_name'] = \ - proxytools.set_proxy_web(url, proxy_data['web_name'], post=opt.get('post', None)) - if proxy_data['web_name']: - proxy_data['stat'] = ', Proxy Web ' + proxy_data['log'] - if headers_proxy: - request_headers.update(dict(headers_proxy)) - if proxy_web and not proxy_data['web_name']: - proxy_web = False - proxy_data['addr'], proxy_data['CF_addr'], proxy_data['web_name'], \ - proxy_data['log'] = proxytools.get_proxy_addr(url, forced_proxy='Total') - if proxy_data['CF_addr']: - proxy = True - proxy_data['dict'] = proxy_data['CF_addr'] - proxy_data['stat'] = ', Proxy CF ' + proxy_data['log'] - elif proxy_data['addr']: - proxy = True - proxy_data['dict'] = proxy_data['addr'] - proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log'] - - except: - import traceback - logger.error(traceback.format_exc()) - opt['proxy'] = '' - opt['proxy_web'] = '' - proxy_data['stat'] = '' - proxy_data['addr'] = '' - proxy_data['CF_addr'] = '' - proxy_data['dict'] = {} - proxy_data['web_name'] = '' - proxy_data['log'] = '' - url = opt['url_save'] - try: - proxy_data['addr']['https'] = str('https://'+ proxy_data['addr']['https']) - except: - pass - return url, proxy_data, opt - - -def proxy_post_processing(url, proxy_data, response, opt): - opt['out_break'] = False - try: - if ', Proxy Web' in proxy_data.get('stat', ''): - import proxytools - response["data"] = proxytools.restore_after_proxy_web(response["data"], - proxy_data['web_name'], opt['url_save']) - if response["data"] == 'ERROR': - response['sucess'] = False - if response["code"] == 302: - proxy_data['stat'] = ', Proxy Direct' - opt['forced_proxy'] = 'ProxyDirect' - url = opt['url_save'] - opt['post'] = opt['post_save'] - response['sucess'] = False - - if proxy_data.get('stat', '') and response['sucess'] == False and \ - opt.get('proxy_retries_counter', 0) <= opt.get('proxy_retries', 1) and opt.get('count_retries_tot', 5) > 1: - import proxytools - if ', Proxy Direct' in proxy_data.get('stat', ''): - proxytools.get_proxy_list_method(proxy_init='ProxyDirect', - error_skip=proxy_data['addr'], url_test=url) - elif ', Proxy CF' in proxy_data.get('stat', ''): - proxytools.get_proxy_list_method(proxy_init='ProxyCF', - error_skip=proxy_data['CF_addr']) - url = opt['url_save'] - elif ', Proxy Web' in proxy_data.get('stat', ''): - if channel_proxy_list(opt['url_save'], forced_proxy=proxy_data['web_name']): - opt['forced_proxy'] = 'ProxyCF' - url =opt['url_save'] - opt['post'] = opt['post_save'] - else: - proxytools.get_proxy_list_method(proxy_init='ProxyWeb', - error_skip=proxy_data['web_name']) - url =opt['url_save'] - opt['post'] = opt['post_save'] - - else: - opt['out_break'] = True - except: - import traceback - logger.error(traceback.format_exc()) - opt['out_break'] = True - - return response["data"], response['sucess'], url, opt - def downloadpage(url, **opt): @@ -410,29 +251,21 @@ def downloadpage(url, **opt): """ load_cookies() - - # if scrapertoolsV2.get_domain_from_url(url) in ['www.seriehd.moda', 'wstream.video', 'www.guardaserie.media', 'akvideo.stream','www.piratestreaming.top']: # cloudflare urls - # if opt.get('session', False): - # session = opt['session'] # same session to speed up search - # else: - # from lib import cloudscraper - # session = cloudscraper.create_scraper() - # else: - # from lib import requests - # session = requests.session() - - if opt.get('session', False): - session = opt['session'] # same session to speed up search - logger.info('same session') - elif opt.get('use_requests', False): - from lib import requests - session = requests.session() - else: + if urlparse.urlparse(url).netloc in ['www.guardaserie.media', 'casacinema.space']: from lib import cloudscraper session = cloudscraper.create_scraper() + elif opt.get('session', False): + session = opt['session'] # same session to speed up search + logger.info('same session') + elif config.get_setting('resolver_dns') and not opt.get('use_requests', False): + from specials import resolverdns + session = resolverdns.session() + else: + from lib import requests + session = requests.session() - # Headers by default, if nothing is specified req_headers = default_headers.copy() + verify = opt.get('verify', True) # Headers passed as parameters if opt.get('headers', None) is not None: @@ -445,148 +278,132 @@ def downloadpage(url, **opt): req_headers['User-Agent'] = random_useragent() url = urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]") - opt['proxy_retries_counter'] = 0 opt['url_save'] = url opt['post_save'] = opt.get('post', None) - while opt['proxy_retries_counter'] <= opt.get('proxy_retries', 1): - response = {} - info_dict = [] - payload = dict() - files = {} - file_name = '' - opt['proxy_retries_counter'] += 1 + response = {} + info_dict = [] + payload = dict() + files = {} + file_name = '' - session.verify = opt.get('verify', True) + session.verify = opt.get('verify', verify) - if opt.get('cookies', True): - session.cookies = cj - session.headers.update(req_headers) + if opt.get('cookies', True): + session.cookies = cj + session.headers.update(req_headers) - # Prepare the url in case you need a proxy, or if proxies are sent from the channel - # url, proxy_data, opt = check_proxy(url, **opt) - # if opt.get('proxies', None) is not None: - # session.proxies = opt['proxies'] - # elif proxy_data.get('dict', {}): - # session.proxies = proxy_data['dict'] - proxy_data = {'dict': {}} + proxy_data = {'dict': {}} - inicio = time.time() + inicio = time.time() - if opt.get('timeout', None) is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None: - opt['timeout'] = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT - if opt['timeout'] == 0: opt['timeout'] = None + if opt.get('timeout', None) is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None: + opt['timeout'] = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT + if opt['timeout'] == 0: opt['timeout'] = None - if len(url) > 0: - try: - if opt.get('post', None) is not None or opt.get('file', None) is not None: - if opt.get('post', None) is not None: - # Convert string post in dict - try: - json.loads(opt['post']) - payload = opt['post'] - except: - if not isinstance(opt['post'], dict): - post = urlparse.parse_qs(opt['post'], keep_blank_values=1) - payload = dict() + if len(url) > 0: + try: + if opt.get('post', None) is not None or opt.get('file', None) is not None: + if opt.get('post', None) is not None: + # Convert string post in dict + try: + json.loads(opt['post']) + payload = opt['post'] + except: + if not isinstance(opt['post'], dict): + post = urlparse.parse_qs(opt['post'], keep_blank_values=1) + payload = dict() - for key, value in post.items(): - try: - payload[key] = value[0] - except: - payload[key] = '' - else: - payload = opt['post'] - - # Verify 'file' and 'file_name' options to upload a buffer or file - if opt.get('file', None) is not None: - if os.path.isfile(opt['file']): - if opt.get('file_name', None) is None: - path_file, opt['file_name'] = os.path.split(opt['file']) - files = {'file': (opt['file_name'], open(opt['file'], 'rb'))} - file_name = opt['file'] + for key, value in post.items(): + try: + payload[key] = value[0] + except: + payload[key] = '' else: - files = {'file': (opt.get('file_name', 'Default'), opt['file'])} - file_name = opt.get('file_name', 'Default') + ', Buffer de memoria' + payload = opt['post'] - info_dict = fill_fields_pre(url, opt, proxy_data, file_name) - if opt.get('only_headers', False): - # Makes the request with HEAD method - req = session.head(url, allow_redirects=opt.get('follow_redirects', True), - timeout=opt['timeout']) + # Verify 'file' and 'file_name' options to upload a buffer or file + if opt.get('file', None) is not None: + if os.path.isfile(opt['file']): + if opt.get('file_name', None) is None: + path_file, opt['file_name'] = os.path.split(opt['file']) + files = {'file': (opt['file_name'], open(opt['file'], 'rb'))} + file_name = opt['file'] else: - # Makes the request with POST method - req = session.post(url, data=payload, allow_redirects=opt.get('follow_redirects', True), - files=files, timeout=opt['timeout']) + files = {'file': (opt.get('file_name', 'Default'), opt['file'])} + file_name = opt.get('file_name', 'Default') + ', Buffer de memoria' - elif opt.get('only_headers', False): - info_dict = fill_fields_pre(url, opt, proxy_data, file_name) + info_dict = fill_fields_pre(url, opt, proxy_data, file_name) + if opt.get('only_headers', False): # Makes the request with HEAD method req = session.head(url, allow_redirects=opt.get('follow_redirects', True), timeout=opt['timeout']) else: - info_dict = fill_fields_pre(url, opt, proxy_data, file_name) - # Makes the request with GET method - req = session.get(url, allow_redirects=opt.get('follow_redirects', True), - timeout=opt['timeout']) + # Makes the request with POST method + req = session.post(url, data=payload, allow_redirects=opt.get('follow_redirects', True), + files=files, timeout=opt['timeout']) - except Exception as e: - from lib import requests - if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''): - req = requests.Response() - response['data'] = '' - response['sucess'] = False - info_dict.append(('Success', 'False')) - response['code'] = str(e) - info_dict.append(('Response code', str(e))) - info_dict.append(('Finalizado en', time.time() - inicio)) - if not opt.get('alfa_s', False): - show_infobox(info_dict) - return type('HTTPResponse', (), response) - else: - req = requests.Response() - req.status_code = str(e) + elif opt.get('only_headers', False): + info_dict = fill_fields_pre(url, opt, proxy_data, file_name) + # Makes the request with HEAD method + req = session.head(url, allow_redirects=opt.get('follow_redirects', True), + timeout=opt['timeout']) + else: + info_dict = fill_fields_pre(url, opt, proxy_data, file_name) + # Makes the request with GET method + req = session.get(url, allow_redirects=opt.get('follow_redirects', True), + timeout=opt['timeout']) + except Exception as e: + from lib import requests + if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''): + response['data'] = '' + response['sucess'] = False + info_dict.append(('Success', 'False')) + response['code'] = str(e) + info_dict.append(('Response code', str(e))) + info_dict.append(('Finalizado en', time.time() - inicio)) + if not opt.get('alfa_s', False): + show_infobox(info_dict) + return type('HTTPResponse', (), response) + else: + req = requests.Response() + req.status_code = str(e) - else: - response['data'] = '' - response['sucess'] = False - response['code'] = '' - return type('HTTPResponse', (), response) + else: + response['data'] = '' + response['sucess'] = False + response['code'] = '' + return type('HTTPResponse', (), response) - response_code = req.status_code + response_code = req.status_code - response['data'] = req.content - response['url'] = req.url - if not response['data']: - response['data'] = '' - try: - response['json'] = to_utf8(req.json()) - except: - response['json'] = dict() - response['code'] = response_code - response['headers'] = req.headers - response['cookies'] = req.cookies + response['data'] = req.content + response['url'] = req.url + if not response['data']: + response['data'] = '' + try: + response['json'] = to_utf8(req.json()) + except: + response['json'] = dict() + response['code'] = response_code + response['headers'] = req.headers + response['cookies'] = req.cookies - info_dict, response = fill_fields_post(info_dict, req, response, req_headers, inicio) + info_dict, response = fill_fields_post(info_dict, req, response, req_headers, inicio) - if opt.get('cookies', True): - save_cookies(alfa_s=opt.get('alfa_s', False)) + if opt.get('cookies', True): + save_cookies(alfa_s=opt.get('alfa_s', False)) - # is_channel = inspect.getmodule(inspect.currentframe().f_back) - # is_channel = scrapertoolsV2.find_single_match(str(is_channel), "<module '(channels).*?'") - # if is_channel and isinstance(response_code, int): - # if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''): - # if response_code > 399: - # show_infobox(info_dict) - # raise WebErrorException(urlparse.urlparse(url)[1]) + # is_channel = inspect.getmodule(inspect.currentframe().f_back) + # is_channel = scrapertools.find_single_match(str(is_channel), "<module '(channels).*?'") + # if is_channel and isinstance(response_code, int): + # if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''): + # if response_code > 399: + # show_infobox(info_dict) + # raise WebErrorException(urlparse.urlparse(url)[1]) - if not 'api.themoviedb' in url and not opt.get('alfa_s', False): - show_infobox(info_dict) - - # If there is a proxy error, refresh the list and retry the number indicated in proxy_retries - # response['data'], response['sucess'], url, opt = proxy_post_processing(url, proxy_data, response, opt) - # if opt.get('out_break', False): - # break + if not 'api.themoviedb' in url and not opt.get('alfa_s', False): + show_infobox(info_dict) return type('HTTPResponse', (), response) diff --git a/core/proxytools.py b/core/proxytools.py deleted file mode 100644 index f3eefb54..00000000 --- a/core/proxytools.py +++ /dev/null @@ -1 +0,0 @@ -import base64; exec(base64.b64decode('import marshal
exec(marshal.loads(base64.b64decode('YwAAAAAAAAAAIwAAAEAAAABzqgIAAGQAAFoAAGQBAGQCAGwBAFoBAGQBAGQCAGwCAFoCAGQBAGQCAGwDAFoDAGQBAGQCAGwEAFoEAGQBAGQCAGwFAFoFAGQBAGQCAGwGAFoGAGQBAGQCAGwHAFoHAGQBAGQCAGwIAFoIAGQBAGQCAGwJAFoJAGQBAGQDAGwKAG0LAFoLAG0MAFoMAG0NAFoNAAFkAQBkBABsDgBtDwBaDwABZAEAZAIAbBAAWhAAZREAWhIAaQcAZAUAZAYANmQHAGQIADZkBwBkCQA2ZAcAZAoANmQHAGQLADZkBwBkDAA2ZAcAZA0ANloTAGQOAGQPAGQQAGQRAGQSAGQTAGQUAGQVAGQWAGQXAGQYAGQZAGQaAGQbAGQcAGQdAGQeAGQfAGQgAGQhAGQiAGQjAGQkAGQlAGQmAGQnAGQoAGQpAGQqAGQrAGQsAGQtAGQuAGQvAGQwAGcjAFoUAGQOAGQXAGQPAGQxAGQVAGQSAGQWAGQaAGQYAGQbAGQeAGQfAGQcAGQyAGQzAGQ0AGQ1AGQiAGQ2AGQlAGcUAFoVAGkCAGRmAGQ9ADZkZwBkQgA2WhYAZD0AWhcAaQQAZEMAZAoANmREAGQGADZkRQBkCAA2ZEYAZA0ANloYAGRHAIQAAFoZAGRIAIQAAFoaAGURAGRJAGUSAGRKAGRLAIQEAFobAGRMAIQAAFocAGUSAGRNAIQBAFodAGQCAGROAIQBAFofAGRPAIQAAFogAGQCAGRQAIQBAFohAGQCAGQCAGRRAIQCAFoiAGRSAIQAAFojAGRTAIQAAFokAGcAAGRUAGRVAGRWAGRXAGRYAGRZAGRaAGRbAGRcAGRdAGRYAGReAGRYAGRVAGRfAGRgAGRcAGRhAGRiAGRjAGRYAGRhAGRkAGRlAGcYAGcCAFolAGQCAFMoaAAAAHPsFwAAClJlYWxpemEgZnVuY2lvbmVzIGRlIG9jdWx0YWNpw7NuIGRlIGxhIHVybCBkZSBsYXMgd2ViIGRlIGRlc3Rpbm8gbGlzdGFkYXMsIHBhcmEgYm9yZGVhciBsb3MgYmxvcXVlb3MganVkaWNpYWxlcyBvIGRlIGxhcyBvcGVyYWRvcmFzLiAgU2UgcHJldGVuZGUgcXVlIGVzdGUgcHJvY2VzbyBzZWEgbG8gbcOhcyBhdXRvbcOhdGljbyB5IHRyYW5zcGFyZW50ZSBwYXJhIGxvcyBjYW5hbGVzIGVuIGxvIHBvc2libGUuCgpFeGlzdGVuIGRvcyB0aXBvcyBkZSBwcm94aWVzIGdyYXR1aXRvczoKLQlQcm94eSBXZWIKLQlQcm94eSDigJxkaXJlY3Rv4oCdLiAgRGVudHJvIGRlIGVzdGUgZ3J1cG8gaGF5IGRpcmVjY2lvbmVzIHF1ZSBzb3BvcnRhciBDbG91ZEZsYXJlLgoKRW4gZWwgUHJveHkgV2ViLCBzZSBsbGFtYSBhIHVuYSB3ZWIgUHJveHkgZG9uZGUgc2UgbGUgcGFzYSBjb21vIFBvc3QgbGEgdXJsIGRlIGxhIHdlYiBkZSBkZXN0aW5vLCBhc8OtIGNvbW8gbG9zIHBhcsOhbWV0cm9zIHF1ZSBpbmRpY2FuIHF1ZSBOTyBlbmNyaXB0ZSBsYSB1cmwgbyBsb3MgZGF0b3MsIHkgcXVlIHPDrSB1c2UgY29va2llcy4KCkVuIGxvcyBkYXRvcyBkZSByZXNwdWVzdGEgaGF5IHF1ZSBzdXByaW1pciBkZSBsYXMgdXJscyB1bmEgY2FiZWNlcmEgeSB1bmEgY29sYSwgcXVlIHZhcsOtYW4gc2Vnw7puIGxhIHdlYiBQcm94eS4gIEVsIHJlc3VsdGFkbyBlcyB1bmEgcMOhZ2luYSBiYXN0YW50ZSBwYXJlY2lkYSBhIGxhIHF1ZSBzZSBvYnRlbmRyw61hIHNpbiB1c2FyIGVsIHByb3h5LCBhdW5xdWUgZW4gZWwgY2FuYWwgcXVlIGxvIHVzZSBzZSBkZWJlIHZlcmlmaWNhciBxdWUgbGFzIGV4cHJlc2lvbmVzIHJlZ2V4IGZ1bmNpb25hbiBzaW4gcHJvYmxlbWFzLgoKRWwgUHJveHkgV2ViIHBhcmVjZSBtw6FzIGVzdGFibGUgeSByw6FwaWRvLCBwZXJvIHRpZW5lIGVsIGluY29udmVuaWVudGUgcXVlIG5vIHNlIHZhbGUgd2VicyBxdWUgdXNlbiBDbG91ZGZsYXJlLgoKU2UgaGEgY3JlYWRvIHVuIERpY2Npb25hcmlvIGNvbiBsYXMgZW50cmFkYXMgdmVyaWZpY2FkYXMgZGUgUHJveHkgV2Vicy4gIEVuIGVzYXMgZW50cmFkYXMgc2UgZW5jdWVudHJhbiBsb3MgcGFyw6FtZXRyb3MgbmVjZXNhcmlvcyBwYXJhIGVudmlhciBsYSB1cmwgZGUgbGEgd2ViIGRlIGRlc3Rpbm8sIGFzw60gY29tbyBwYXJhIGNvbnZlcnRpciBsb3MgZGF0b3MgZGUgcmV0b3JubyBhIGFsZ28gdHJhbnNwYXJlbnRlIHBhcmEgZWwgY2FuYWwuICBIYWJyw6EgcXVlIGlyIGHDsWFkaWVuZG8geSBib3JyYW5kbyBXZWJzIFByb3h5IHNlZ8O6biBzdSByZW5kaW1pZW50byB5IGVzdGFiaWxpZGFkLgoKRWwgUHJveHkg4oCcZGlyZWN0b+KAnSwgZXMgdG90YWxtZW50ZSB0cmFuc3BhcmVudGUgcGFyYSBlbCBjYW5hbCwgcGVybWl0aWVuZG8gdXNhciBQb3N0LCB5IGVuIGFsZ3Vub3MgY2Fzb3MgbGxhbWFkYXMgYSB3ZWJzIHF1ZSB1c2FuIENsb3VkZmxhcmUuICBFbCBwcm9ibGVtYSBxdWUgdGllbmVuIGVzdG9zIFByb3hpZXMgZXMgc3UgZXh0cmVtYWRhIHZvbGF0aWxpZGFkIGVuIGxhIGRpc3BvbmliaWxpZGFkIHkgdGllbXBvIGRlIHJlc3B1ZXN0YS4KClNlIGhhIGNvbmZlY2Npb25hZG8gdW5hIGxpc3RhIGluaWNpYWwgZGUgUHJveGllcyBkaXJlY3RvcyB5IG90cmEgZGUgUHJveGllcyBDbG91ZEZsYXJlLCBwcmluY2lwYWxtZW50ZSBkZSBTaW5nYXB1ciB5IEhvbmcgS29uZywgcXVlIGhhbiBzaWRvIHByb2JhZG9zIHkgcXVlIHN1ZWxlbiBmdW5jaW9uYXIgY29uIHJlZ3VsYXJpZGFkLiAgQSBlc3RhIGxpc3RhIGluaWNpYWwgc2UgYcOxYWRlbiBkaW7DoW1pY2FtZW50ZSBvdHJvcyBkZSB3ZWIocykgcXVlIGxpc3RhbiBlc3RvcyBwcm94eSBncmF0dWl0b3MsIGNvbiBhbGd1bm9zIGNyaXRlcmlvcyBkZSBiw7pzcXVlZGEgZXhpZ2VudGVzIGRlIGRpc3BvbmliaWxpZGFkIHkgdGllbXBvIGRlIHJlc3B1ZXN0YS4KClNlIGhhIG9wdGFkbyBwb3IgdXNhciBwb3IgZGVmZWN0byBsb3MgUHJveGllcyDigJxkaXJlY3Rvc+KAnSwgZGVqYW5kbyBsb3MgUHJveHkgV2VicyBjb21vIGFsdGVybmF0aXZhIGF1dG9tw6F0aWNhIHBhcmEgZWwgY2FzbyBkZSBpbmRpc3BvbmliaWxpZGFkIGRlIFByb3hpZXMg4oCcZGlyZWN0b3PigJ0uCgpEZXNkZSBjdWFscXVpZXIgQ2FuYWwgc2UgcHVlZGVuIGhhY2VyIGxsYW1hZGFzIGEgSHR0cHRvb2xzIHBhcmEgcXVlIHNlYW4gZmlsdHJhZGFzIHBvciBhbGfDum4gdGlwbyBkZSBQcm94eS4gIExhcyBsbGFtYWRhcyBkZWJlbiBpbmNsdWlyIGxvcyBwYXLDoW1ldHJvcyAicHJveHk9VHJ1ZSBvIHByb3h5X3dlYj1UcnVlIiB5ICJmb3JjZWRfcHJveHk9VG90YWx8UHJveHlEaXJlY3R8UHJveHlDRnxQcm94eVdlYiIuICBDb24gbGEgb3BjacOzbiAiVG90YWwiIGFzdW1pcsOhICJQcm94eURpcmVjdCIgcGFyYSAicHJveHk9VHJ1ZSIKClRBQkxBUzoKQ29tbyB2YSBhIHNlciB1biBtw7NkdWxvIGRlIG11Y2hvIHVzbywgc2UgaGEgb3JnYW5pemFkbyBjb24gdGFibGFzIGVuIG1lbW9yaWEgZW4gdmV6IGRlIGVuIGFyY2hpdm9zIC5qc29uLCBwYXJhIG1pbmltaXphciBlbCBpbXBhY3RvIGVuIGVsIHJlbmRpbWllbnRvLiAgUG9yIG90cmEgcGFydGUsIGVzIHJlY29tZW5kYWJsZSBxdWUgZXN0ZSAucHkgKHkgcG9yIHRhbnRvIHN1cyB0YWJsYXMpIGVzdMOpIOKAnGVuY29kZWTigJ0gbyBtZWpvciBlbmNyaXB0YWRvIHBhcmEgZXZpdGFyIHF1ZSBsYXMgYWNjaW9uZXMgeSBkaXJlY2Npb25lcyBxdWUgYXF1w60gc2UgZGVzY3JpYmVuIHNlYW4gZsOhY2lsbWVudGUgbmV1dHJhbGl6YWJsZXMuCgotCWwxMTFsbCA9IGxpc3RhIGRlIHdlYnMgYmxvcXVlYWRhcyBhIHNlciB0cmF0YWRhcyBwb3IgUHJveHl0b29scy4gIFRpZW5lIGxpc3RhIGRlIGJsb3F1ZW9zIGdlb2dyw6FmaWNhLgotCWwxbDFsbCA9IGxpc3RhIGRlIFByb3hpZXMg4oCcZGlyZWN0b3PigJ0gaW5pY2lhbGVzLCB2ZXJpZmljYWRvcyB0YW50byBwYXJhIGh0dHAgY29tbyBwYXJhIGh0dHBzCi0gICBsMTExbDEgPSBsaXN0YSBkZSBQcm94aWVzIOKAnGRpcmVjdG9zIENsb29kRmxhcmXigJ0gaW5pY2lhbGVzLCB2ZXJpZmljYWRvcwotCWxsMTFsbCA9IGxpc3RhIGRlIFByb3h5IFdlYnMsIGNvbiBzdXMgcGFyw6FtZXRyb3MgZGUgdXNvCi0JbDExMTExID0gbGlzdGEgZGUgd2VicyBibG9xdWVhZGFzIGRvbmRlIHNlIGRpY2UgY29uIHF1w6kgdGlwbyBkZSBwcm94eSBlc3BlY2lmaWNvIHNlIHF1aWVyZSB0cmF0YXIuICBTaSBsYSB3ZWIgYmxvcXVlYWRhIG5vIGVzdMOhIGVuIGVzdGEgbGlzdGEsIHNlIHRyYXRhIGNvbiBsb3MgcHJveGllcyBwb3IgZGVmZWN0by4KCk3DiVRPRE9TOgotCWdldF9wcm94eV9saXN0X21vbml0b3I6IGVzIHVuIFNFUlZJQ0lPIHF1ZSBzZSBsYW56YSBhbCBpbmljaW8gZGUgQWxmYS4gU2kgZW4gbG9zIHNldHRpbmdzIHNlIGhhIGVzcGVjaWZpY2FkbyBxdWUgZWwgdXNvIGRlICJBY2Nlc28gQWx0ZXJuYXRpdm8gYSBsYSBXZWIiIGVzdMOhIGRlc2FjdGl2YWRvLCBzZSBhY3RpdmEgZWwgIk1vZG8gRGVtYW5kYSIgdXNhbmRvIGxhcyBkaXJlY2lvbmVzIGRlIFByb3hpZXMgcG9yIGRlZmVjdG8uICBTaSBlc3TDoSBhY3Rpdm8sIHNlIGFzdW1lIGVsICJNb2RvIEZvcnphZG8iIHkgc2UgZWplY3V0YXLDoSBwZXJpw7NkaWNhbWVudGUgKGNhZGEgMTIgaG9yYXMpLCBzaWVtcHJlIHF1ZSBubyBoYXlhIHJlcG9yZHVjY2lvbmVzIGFjdGl2YXMuICBFc3RlIHNlcnZpY2lvIHJlYWxpemEgbGFzIHNpZ3VpZW50ZSBmdW5jaW9uZXM6CglvCUlkZW50aWZpY2EgZWwgcGHDrXMgZGVsIHVzdWFyaW8geSBhY3RpdmEvZGVzYWN0aXZhIGVsIHByb3h5IGVuIGNhZGEgd2ViIGJsb3F1ZWRhZCBzZWfDum4gbGEgbGlzdGEgcGFpc2VzIGJsb3F1ZWFkb3MKCW8JQWxlYXRvcml6YSBsYXMgbGlzdGFzIGluaWNpYWxlcyBkZSBkaXJlY2Npb25lcyBwcm94eQogICAgbwlTaSBubyBoYXkgYmxvcXVlb3MgZW4gbGEgem9uYSBnZW9ncsOhZmljYSBkZWwgdXN1YXJpbywgYWJhbmRvbmEKLSAgIFNpIGVzdGFtb3MgZW4gIk1vZG8gRm9yemFkbyIsIGxsYW1hIGFsIG3DqXRvZG8gZ2V0X3Byb3h5X2xpc3RfbWV0aG9kLCBxdWUgcmVhbGl6YSBlc3RhcyB0YXJlYXMgZGUgaW5pY2lsYWl6YWNpw7NuIGRlIHRhYmxhczoKCW8JQ2FyZ2EgbGEgbGlzdGEgaW5pY2lhbCBkZSBQcm94aWVzIOKAnGRpcmVjdG9z4oCdIHkgbG9zIGFsZWF0b3JpemEKCW8JRGUgbGEgd2ViIOKAnEhpZGVNeS5uYW1l4oCdIG9idGllbmUgdW5hIGxpc3RhIGFkaWNpb25hbCBkZSBQcm94aWVzIOKAnGRpcmVjdG9z4oCdCglvCVVzYW5kbyBsYSB3ZWIgYmxvcXVlYWRhIOKAnG1lam9ydG9ycmVudC5jb23igJ0sIHNlIHZhbGlkYW4gbG9zIFByb3hpZXMg4oCcZGlyZWN0b3PigJ0gaGFzdGEgcXVlIHNlIGVuY3VlbnRyYSB1bm8gcXVlIHJlc3BvbmRlIGNvcnJlY3RhbWVudGUuICBFc3RlIHByb3h5IGVuY29udHJhZG8gcGFzYSBhIHNlciBlbCB1c2FkbyBwb3IgZGVmZWN0byBkdXJhbnRlIGVzdGUgcGVyaW9kbwogICAgbwlTaW1pbGFyIGEgUHJveGllcyDigJxkaXJlY3Rvc+KAnSwgZGUgbGEgbGlzdGEgaW5pY2lhbCBzZSBhbGVhdG9yaXphIHkgc2UgdmVyaWZpY2EgdW5vIHF1ZSBmdW5jaW9uZQoJbwlTZSB2YWxpZGEgbGEgbGlzdGEgZGUgUHJveHkgV2VicyBoYXN0YSBxdWUgc2UgZW5jdWVudHJhIHVuYSBxdWUgcmVzcG9uZGEgY29ycmVjdGFtZW50ZS4gIEVzdGEgUHJveHkgV2ViIGVuY29udHJhZGEgcGFzYSBhIHNlciBsYSB1c2FkYSBwb3IgZGVmZWN0byBkdXJhbnRlIGVzdGUgcGVyaW9kby4gRXMgcHJlZmVyaWJsZSB1dGlsaXphciAiSGlkZS5tZSIgcG9yIHN1IHJlcHV0YWNpw7NuIHkgcG9ycXVlIHNvcG9ydGEgYmllbiBsYXMgbGxhbWFkYXMgY29uIFBPU1QgZGVzZGUgZWwgY2FuYWwuICBTaSBubyBlc3R1dmllcmEgZGlzcG9ub2JsZSwgdGUgdG9tYXLDrWEgb3RyYSwgcGVybyBsYXMgbGxhbWFkYXMgY29uIFBPU1Qgc2UgcmVhbGl6YXLDrWFuIHBvciAiUHJveHlEaXJlY3QiCglvCUVuIGxhIOKAnHdoaXRlbGlzdOKAnSBzZSBhbmFsaXphIHNpIGhheSBtw6FzIGRlIHVuYSBQcm94eSBhbHRlcm5hdGl2byBwb3Igd2ViIGJsb3F1ZWFkYS4gIFNpIGVzIGFzw60sIHNlIGFsZWF0b3JpemFuIGxhcyBlbnRyYWRhcyB5IHNlIGVzY29nZSB1bmEgcGFyYSBlc3RlIHBlcmlvZG8KCW8JTG9zIGRhdG9zIGRlIFByb3h5IOKAnGRpcmVjdG/igJ0gYWN0aXZvLCBsaXN0YSBkZSBQcm94aWVzIOKAnGRpcmVjdG9z4oCdLCBub21icmUgZGUgUHJveHkgV2ViIGFjdGl2bywgIHkgUHJveHkg4oCcd2hpdGVsaXN04oCdIGVuIHVzbyBzZSBndWFyZGFuIGNvbW8gcGFyw6FtZXRyb3MgZW4g4oCcc2V0dGluZ3MueG1sLCBlbmNvZGVkIEJhc2U2NOKAnSwgYXVucXVlIGVzdMOhIHByZXBhcmFkbyBwYXJhIGVuY3JpcHRhcmxvIGNvbiB1biBuaXZlbCBkZSBzZWd1cmlkYWQgbcOhcyBhbHRvLgotCXJhbmRvbWl6ZV9saXN0czogYWxlYXRvcml6YSBsYXMgbGlzdGFzIGluaWNpYWxlcyBkZSBkaXJlY2Npb25lcyBwcm94eQotCXNldF9wcm94eV93ZWI6IHByZXBhcmEgbG9zIHBhcsOhbWV0cm9zIHBhcmEgbGxhbWFyIGEgdW4gUHJveHkgV2ViCi0JcmVzdG9yZV9hZnRlcl9wcm94eV93ZWI6IHJldGlyYSBsb3MgZGF0b3MgZGVsIFByb3h5IFdlYiBkZSBsYSByZXNwdWVzdGEsIHBhcmEgaGFjZXJsbyB0cmFuc3BhcmVudGUgYWwgY2FuYWwKLQljaGFubmVsX3Byb3h5X2xpc3Q6IHZlcmlmaWNhIHNpIGxhIHdlYiBkZSBsYSB1cmwgZXN0w6EgYmxvcXVlYWRhIGVuIGVzYSBnZW9sb2NhbGl6YWNpw7NuCi0JZ2V0X3Byb3h5X2FkZHI6IHBhc2EgbG9zIGRhdG9zIGRlbCBQcm94eSDigJxkaXJlY3Rv4oCdLCBQcm94eSDigJxDbG91ZEZsYXJl4oCdIHkgUHJveHkgV2ViIHBvciBkZWZlY3RvLCBtb2RpZmljYWRvcyBjb24gbG9zIHZhbG9yZXMgZGUgbGEg4oCcd2hpdGVsaXN04oCdLCBzaSBsb3MgaGF5Ci0JZW5jcnlwdF9wcm94eTogY29kaWZpY2EgZW4gQmFzZTY0IGxvcyBkYXRvcyBwYXNhZG9zLCBjb24gcG90ZW5jaWFsIHBhcmEgZW5jcmlwdGFjacOzbgotCWRlY3J5cHRfcHJveHk6IGRlY29kaWZpY2EgZGVzZGUgQmFzZTY0IGxvcyBkYXRvcyBwYXNhZG9zCmn/////TigDAAAAdAYAAABjb25maWd0BgAAAGxvZ2dlcnQNAAAAcGxhdGZvcm10b29scygBAAAAdBEAAABXZWJFcnJvckV4Y2VwdGlvbnMFAAAARVMsUE9zFAAAAHd3dy5tZWpvcnRvcnJlbnQuY29tdAIAAABFU3MUAAAAd3d3Lm1lam9ydG9ycmVudC5vcmdzFAAAAHd3dy5tZWpvcnRvcnJlbnQubmV0cwgAAABnbnVsYS5udXMIAAAAZ251bGEuc2VzFAAAAHd3dy5lbGl0ZXRvcnJlbnQuYml6cxEAAABtZWpvcnRvcnJlbnQxLm5ldHMSAAAANjguMTgzLjE4My40NDoxMTExcxEAAAAxMTkuMjguMzEuMjk6ODg4OHMTAAAAMTU3LjIzMC40NS4xMjE6MTExMXMVAAAAMTI4LjE5OS4xNTIuMTY5OjMxMzMwcxEAAAA5MC4xNDUuMjIxLjE4Njo4MHMSAAAAMTU3LjIzMC4zMy4zNzoxMTExcxMAAAAxNzguMTI4LjEwMy4zOjMxMzMwcxMAAAAxMDQuMjQ4LjE1NC45NzoxMTExcxMAAAAxNTcuMjMwLjM0LjE5MDoxMTExcxQAAAAxMjguMTk5LjE1Ni4zNzozMTMzMHMVAAAAMTI4LjE5OS4xMzguMTM2OjMxMzMwcxMAAAAxMTEuMjIzLjc1LjE3ODo4MDgwcxUAAAAxMjguMTk5LjEzMi4xMjg6MzEzMzBzFQAAADEyOC4xOTkuMTM2LjE5NzozMTMzMHMVAAAAMTI4LjE5OS4xNTUuMTgyOjMxMzMwcxUAAAAxMjguMTk5LjE0Ny4yMDg6MzEzMzBzFQAAADEyOC4xOTkuMTQ0LjE3NDozMTMzMHMUAAAAMTI4LjE5OS4xNDguNDU6MzEzMzBzFQAAADEyOC4xOTkuMTY4LjEzMjozMTMzMHMUAAAAMTc4LjEyOC42My4xNTU6MzEzMzBzEwAAADUyLjE2My4yMDcuMTAwOjMxMjhzEwAAADEwMy40Mi4yMTMuMTc3OjgwODBzEwAAADEwMy40Mi4yMTMuMTc2OjgwODBzDgAAADEzLjcwLjI0LjE1OjgwcxEAAAAxNTAuMTA5LjU1LjE5MDo4M3MQAAAAMTU5LjEzOC4xLjE4NTo4MHMSAAAAMjE4LjEwMi4xMTkuNzo4MzgwcxIAAAAyMTguMTAyLjExOS43OjgzODNzEgAAADIxOC4xMDIuMTE5Ljc6ODM4MnMOAAAANDcuOTAuNTAuMTc6ODBzEgAAADIxOC4xMDIuMTE5Ljc6ODM4NXMSAAAAMjE4LjEwMi4xMTkuNzo4MTk3cxIAAAA1OS4xNDkuNjAuMjA5OjgzODBzEwAAADExMy4yNTIuMjIyLjczOjgzODBzEQAAADIxMC4wLjEyOC41ODo4MDgwcxAAAAA5My44OC43NS4zMTo4MDgwcxMAAAAxMjguMTk5LjIwNC42OjMxMzMwcxAAAAAxMy4yMjkuNjYuMTU0OjgwcxEAAAAxMzkuOTkuNi4xNDI6MzEyOHMOAAAAMTU5LjY1LjEuMjY6ODBzEAAAADU0LjI1NC4xOTguNzE6ODBzOgAAAGh0dHBzOi8vbmwuaGlkZXByb3h5Lm1lL2luY2x1ZGVzL3Byb2Nlc3MucGhwP2FjdGlvbj11cGRhdGVzFwAAAGh0dHBzOi8vbmwuaGlkZXByb3h5Lm1ldAAAAABzCgAAAC9nby5waHA/dT1zDQAAACZiPTR8JmFtcDtiPTRzOAAAAHU9JXMmcHJveHlfZm9ybWRhdGFfc2VydmVyPW5sJmFsbG93Q29va2llcz0xJmVuY29kZVVSTD0wcwcAAABoaWRlLm1lczUAAABodHRwOi8vd2VicHJveHkudG8vaW5jbHVkZXMvcHJvY2Vzcy5waHA/YWN0aW9uPXVwZGF0ZXMSAAAAaHR0cDovL3dlYnByb3h5LnRvcw4AAAAvYnJvd3NlLnBocD91PXNGAAAAdT0lcyZlbmNvZGVVUkw9MCZlbmNvZGVQYWdlPTAmYWxsb3dDb29raWVzPW9uJnN0cmlwSlM9MCZzdHJpcE9iamVjdHM9MHMLAAAAd2VicHJveHkudG90BwAAAFByb3h5Q0ZzEAAAAFByb3h5V2ViOmhpZGUubWVzHAAAAFByb3h5V2ViOmhpZGUubWUsd2VicHJveHkudG90CAAAAFByb3h5V2ViYwAAAAABAAAAAwAAAEMAAABzVwAAAHQAAHQBAGQBAIQCAH0AAHknAHQCAGoDAGQCAHwAAIMAAWoEAIMAAAF0BQBqBgBkAwCDAQABV24aAAEBAXQHAGoIAHQJAGoKAIMAAIMBAAFuAQBYZAAAUygEAAAATmMCAAAAEAAAAAwAAABTAAAAc44DAABkAQBkAABsAAB9AgBkAQBkAABsAQB9AwB0AgB9BAB5MwB0AwBqBABkAgCDAQBzUAB8AQByRgB0BQBqBgBkAwCDAQABbgAAdAcAgwAAAW4AAFduJAABAQF8AQBybQB0BQBqBgBkAwCDAQABbgAAdAcAgwAAAW4BAFh8AwBqCABqCQB8AwBqCABqCgB0AwBqCwCDAABkBABkBQCDAwCDAQBysQBkBgB9BQB0DAB9BABuFQBkBwB9BQB8AQByxgB0DAB9BABuAAB0AwBqDQBkCAB0DgB8BQCDAQCDAgABZAkAfQYAZAoAfQcAdA8AahAAgwAAfQgAeUwAZAsAfQkAfAIAahEAfAYAZAwAdAwAZA0AdAwAZA4AZA8AZBAAdAIAZBEAfAQAgwEFahIAfQkAdBMAahQAfAkAZBIAgwIAfQcAV24gAAEBAXQFAGoGAHQVAGoWAIMAAIMBAAFkCgB9BwBuAQBYfAUAZAYAawIAc3UBfAEAcpEBdAUAahcAZBMAfAcAF2QUABd8CQAXgwEAAW4AAHQMAH0KAHhjAHwIAGoYAIMAAERdVQBcAgB9CwB9DAB8BwB8DABrBgBzyAFkFQB8DABrBgBy5QF8CABqGQBpAQBkFgB8CwA2gwEAAXQCAH0KAHGkAXwIAGoZAGkBAGQXAHwLADaDAQABcaQBV3wIAHIiAnQDAGoNAGQYAHQOAHQaAHwIAIMBAIMBAIMCAAFuFgB0AwBqDQBkGAB0DgBkCwCDAQCDAgABfAUAZAYAawIAc0oCfAEAcnsCdAUAahcAZBkAdBoAdAMAagQAZBoAgwEAgwEAF2QbABd0GgB8CgCDAQAXgwEAAW4AAHwFAGQGAGsCAHONAnwBAHKdAnQbAGQcAHwBAIMAAQFuAAB8CgBzrgJ0BwCDAAABZAAAU3QDAGoEAGQaAIMBAHPIAnQHAIMAAAFkAABTfAEAcvECdAUAahcAZB0AdBoAdAMAagQAZBoAgwEAgwEAF4MBAAFuAAB0AwBqHAB0AgCDAQBkHgAZZB8AawUAciUDZAEAZAAAbB0AfQ0AfA0Aah4AgwAAfQ4AbhcAdB8AZCAAfAAAZBwAfAEAgwACAWQAAFN4SwB8DgBqIACDAABziQN0IQBqIgCDAABzbQN0HwBkIAB8AABkHAB8AQCDAAIBbgAAZCMAfQ8AfA4AaiMAfA8AgwEAcj8DUHE/A3E/A1dkAABTKCQAAABOaf////90CgAAAHByb3h5X2FkZHJzDQAAAE5PIHByb3h5X2FkZHJ0CAAAAGNoYW5uZWxzcwkAAABjdXN0b20ucHl0AwAAAGRldnQEAAAAdXNlcnQJAAAAcHJveHlfZGV2cxoAAABodHRwczovL2dlb2lwLWRiLmNvbS9qc29uL1IEAAAAUgUAAAB0BQAAAHByb3h5dAkAAABwcm94eV93ZWJ0BwAAAHRpbWVvdXRpCgAAAHQOAAAAcmFuZG9tX2hlYWRlcnN0BgAAAGFsZmFfc3MYAAAAImNvdW50cnlfY29kZSI6IihbXiJdKykicwgAAABHZW9sb2M6IHMDAAAAIC8gdAMAAABBTEx0AgAAAE9OdAMAAABPRkZ0FQAAAHByb3h5X2NoYW5uZWxfYmxvcXVlZHMYAAAAQWx0ZXJuYXRpdmVfd2ViX2FjY2VzczogdBYAAABhbHRlcm5hdGl2ZV93ZWJfYWNjZXNzcxEAAAAgLyBQcm94eSBBY3Rpdm86IHQJAAAAZGVidWdnaW5ncxUAAABFbnRyYW5kbyBlbiBNb25pdG9yIDp0CwAAAG51bV92ZXJzaW9uaQ4AAAB0BAAAAHRlc3RpEA4AAGkMAAAAacCoAAAoJAAAAHQJAAAAaHR0cHRvb2xzdAIAAABvc3QEAAAAVHJ1ZVIAAAAAdAsAAABnZXRfc2V0dGluZ1IBAAAAdAUAAABlcnJvcnQPAAAAcmFuZG9taXplX2xpc3RzdAQAAABwYXRodAYAAABleGlzdHN0BAAAAGpvaW50EAAAAGdldF9ydW50aW1lX3BhdGh0BQAAAEZhbHNldAsAAABzZXRfc2V0dGluZ3QNAAAAZW5jcnlwdF9wcm94eXQGAAAAbDExMWxsdAQAAABjb3B5dAwAAABkb3dubG9hZHBhZ2V0BAAAAGRhdGF0DAAAAHNjcmFwZXJ0b29sc3QRAAAAZmluZF9zaW5nbGVfbWF0Y2h0CQAAAHRyYWNlYmFja3QKAAAAZm9ybWF0X2V4Y3QEAAAAaW5mb3QFAAAAaXRlbXN0BgAAAHVwZGF0ZXQDAAAAc3RydAsAAABsb2dnZXJfZGlzcHQMAAAAZ2V0X3BsYXRmb3JtdAQAAAB4Ym1jdAcAAABNb25pdG9ydBUAAABnZXRfcHJveHlfbGlzdF9tZXRob2R0DgAAAGFib3J0UmVxdWVzdGVkUgIAAAB0CgAAAGlzX3BsYXlpbmd0DAAAAHdhaXRGb3JBYm9ydCgQAAAAUhkAAABSFwAAAFIaAAAAUhsAAABSEQAAAFIMAAAAdAwAAABwcm94eV9nZW9sb2N0DAAAAGNvdW50cnlfY29kZVIVAAAAUioAAAB0DAAAAHByb3h5X2FjdGl2ZXQHAAAAY2hhbm5lbHQJAAAAY291bnRyaWVzUjUAAAB0BwAAAG1vbml0b3J0BQAAAHRpbWVyKAAAAAAoAAAAAHMIAAAAPHN0cmluZz50FgAAAGdldF9wcm94eV9saXN0X21vbml0b3JiAAAAc34AAAAAAQwBDAIGAgMBDwEGABABDgEDAQYAEAELAioBBgEJAgYBBgAJARYDBgEGAQwBAwEGATABFgEDARMBCgESARwDBgEZARgBFAEJAhgBBgEfAhYCEgAxARIAEAMGAQcBBAMPAQcBBAIGACMCGQEMAQ8CEwEEAg8CDAEWAgYBDwF0BgAAAHRhcmdldGkFAAAAKAsAAABSJAAAAFIXAAAAdAkAAAB0aHJlYWRpbmd0BgAAAFRocmVhZHQFAAAAc3RhcnR0BAAAAHRpbWV0BQAAAHNsZWVwUgEAAABSHgAAAFItAAAAUi4AAAAoAQAAAFJCAAAAKAAAAAAoAAAAAHMIAAAAPHN0cmluZz50DgAAAGdldF9wcm94eV9saXN0YAAAAHMOAAAAAAIPUQMBFgERAQMBFwJjAAAAAAcAAAAGAAAAQwAAAHPNAQAAdAAAHn0AAHQBAGoCAHwAAIMBAAF0AwBqBABkAQB0BQB0BgB8AABkAgAZgwEAgwEAgwIAAXQDAGoEAGQDAHQFAHQGAHwAAIMBAIMBAIMCAAF0BwAefQAAdAEAagIAfAAAgwEAAXQDAGoEAGQEAHQFAHQGAHwAAGQCABmDAQCDAQCDAgABdAMAagQAZAUAdAUAdAYAfAAAgwEAgwEAgwIAAXQDAGoEAGQGAHQFAGQHAIMBAIMCAAF0CABqCQCDAAB9AQBnAAB9AgB44gB8AQBqCgCDAABEXdQAXAIAfQMAfQQAZAgAfQUAfAEAfAMAGX0GAGQJAHwGAGsGAHIYAWQJAH0FAHwGAGoLAGQJAGQIAIMCAH0GAG4AAGQKAHwGAGsGAHI/AWQKAH0FAHwGAGoLAGQKAGQIAIMCAH0GAG4AAHwGAGoMAGQLAIMBAH0CAHQNAHwCAIMBAGQMAGsEAHJwAXQBAGoCAHwCAIMBAAFuAAB8BQBykQF8BQB0BgB8AgBkAgAZgwEAF3wCAGQCADxuAAB8AQBqDgBpAQB8AgBkAgAZfAMANoMBAAFx1QBXdAMAagQAZA0AdAUAdAYAfAEAgwEAgwEAgwIAAWQAAFMoDgAAAE5SCAAAAGkAAAAAdAoAAABwcm94eV9saXN0dA0AAABwcm94eV9DRl9hZGRydA0AAABwcm94eV9DRl9saXN0dA4AAABwcm94eV93ZWJfbmFtZXMHAAAAaGlkZS5tZVIFAAAAcwgAAABQcm94eUNGOnMJAAAAUHJveHlXZWI6dAEAAAAsaQEAAAB0EAAAAHByb3h5X3doaXRlX2xpc3QoDwAAAHQGAAAAbDFsMWxsdAYAAAByYW5kb210BwAAAHNodWZmbGVSAAAAAFIlAAAAUiYAAABSMgAAAHQGAAAAbDExMWwxdAYAAABsMTExMTFSKAAAAFIwAAAAdAcAAAByZXBsYWNldAUAAABzcGxpdHQDAAAAbGVuUjEAAAAoBwAAAHQHAAAAcHJveGllc1JPAAAAdAsAAABwcm94eV90YWJsZXQHAAAAbGFiZWxfYXQHAAAAdmFsdWVfYXQJAAAAcHJveHlfdF9zdAcAAABwcm94eV90KAAAAAAoAAAAAHMIAAAAPHN0cmluZz5SHwAAALwAAABzOAAAAAADBwENASABHAMHAQ0BIAEcAxYDDAEGAhkBBgEKAQwBBgEVAQwBBgEVAQ8BEgEQAQYBGwEcAhwCdAUAAABUb3RhbGkFAAAAYwQAAAAoAAAAEwAAAEMAAABzWwsAAHwCAHIuAHQAAGoBAGQBAHQCAHwAAIMBABdkAgAXdAIAfAEAgwEAF4MBAAFuAABkAwBkAABsAwB9BAB0BAB9BQB5GQB0BQB0BgBqBwBkBACDAQCDAQB9BgBXbiMAAQEBZAUAfQYAdAYAaggAZAQAdAkAfAYAgwEAgwIAAW4BAFh8BgBkBgBrAgBzkQB8AgBymgB0CgB9BQBuAABnAAB9BwBkBwB9CABnAAB9CQBkBwB9CgBnAAB9CwBkBwB9DABkCAB9DQBkCQB9DgBkCgB9DwBkCwB9EAB0CwCDAAABZwAAfREAdAUAdAYAagcAZAwAgwEAgwEAfRIAdAwAag0AfBIAgwEAfREAfBEAHn0TAHwGAGQGAGsCAHMgAXwCAHI6AXQAAGoOAGQNAHQCAHwRAIMBABeDAQABbgAAfAYAZAYAawIAc0wBfAIAcnUBdAAAag4AZA4AdAIAdAUAdAYAagcAZA8AgwEAgwEAgwEAF4MBAAFuAABkEABkEQBnAgB9FAB4PgF8FABEXTYBfRUAed8AZAcAfRYAfAQAag8AZBIAfBUAFmQTAHQKAGQUAHQKAGQVAGQWAGQXAGQYAGQZAGQaAGQbAHQEAIMBBmoQAH0WAHwWAHJsAnQRAGoSAGQcAGQHAHwWAIMDAH0WAGQdAHQRAGoSAGQeAGQfAHwWAIMDABd9FgB8FgBqEwBkIACDAQByIQJ8FgBkIQAgfRYAbgAAZwAAfRcAdAwAag0AfBYAgwEAfRcAeDMAfBcARF0oAH0YAHwYAH0ZAHwZAHwRAGsHAHI9AnwRAGoUAHwZAIMBAAFxPQJxPQJXbgAAV24aAAEBAXQAAGoVAHQWAGoXAIMAAIMBAAFuAQBYfAYAZAYAawIAc5wCfAIAcogBdAAAag4AZCIAfBUAF2QjABd0AgB8FwCDAQAXgwEAAXGIAXGIAVd8AABy3wN5RgBkBwB9FgB8BABqDwBkJABkEwB0BABkFAB0CgBkFQBkJQBkFwBkGABkGQBkGgBkGwB0BABkJgBkJwCDAQdqEAB9FgBXbhoAAQEBdAAAahUAdBYAahcAgwAAgwEAAW4BAFh8FgBy3wN0EQBqEgBkKABkBwB8FgCDAwB9FgBkKQB9GgB0EQBqGAB8GgB0EQBqGQCDAgBqGgB8FgCDAQB9FwB4QAB8FwBEXTgAXAIAfRgAfRsAZCoAfBgAfBsAZgIAFn0ZAHwZAHwRAGsHAHJxA3wRAGoUAHwZAIMBAAFxcQNxcQNXfAYAZAYAawIAc78DfAIActwDdAAAag4AZCsAdAIAfBcAgwEAF4MBAAFx3ANx3wNuAAB8BgBkBgBrAgBz8QN8AgByCwR0AABqDgBkLAB0AgB8EQCDAQAXgwEAAW4AAHwBAGQtAGsCAHMjBHwBAGQuAGsCAHKCBnwGAGQGAGsCAHM1BHwCAHJJBHQAAGoOAGQvAHwBABeDAQABbgAAZwAAfRwAdBsAahsAgwAAfR0AeHcBdBwAZBYAdB0AfBEAgwEAfAMAgwMARF1dAX0eAHwIAHKLBHwAAAxyiwRQbgAAeKsAfBEAfB4AfB4AfAMAFyFEXZgAfRkAfBkAfAkAawYAcrUEcZ0EbgAAeWMAdB4Aah8AZDAAdCAAZDEAfBkAfA0AfA4AdAQAdAoAZC4AZBYAZCUAZDIAfAUAfB0AfAAAfAIAZg0AgwACfR8AdAQAfB8AXyEAfB8AaiIAgwAAAXwcAGoUAHwfAIMBAAFXcZ0EAQEBdAAAahUAdBYAahcAgwAAgwEAAXGdBFhxnQRXeJUAZwAAfBwARF0YAH0gAHwgAGojAIMAAHJDBXwgAF4CAHFDBXLQBXlPAHwdAGokAHQEAGQlAIMCAH0IAHwJAGoUAHwIAIMBAAF0BgBqCABkMwB0CQB0AgB8CACDAQCDAQCDAgABfAAAc68FfBMAfQkAUG4AAFdxPAUEdBsAaiUAawoAcswFAQEBZAcAfQgAcTwFWHE8BVdxdARXfAgADHL7BXQdAHwJAIMBAGQWAGsEAHL7BXwJAGQDABl9CABuAAB8CAAMchsGfAEAZC0AawIAchsGdCYAZBYAGX0IAG4AAHQGAGoIAGQzAHQJAHQCAHwIAIMBAIMBAIMCAAF0BgBqCABkDAB0CQB0AgB8CQCDAQCDAQCDAgABfAYAZAYAawIAc2UGfAIAcoIGdAAAagEAZDQAdAIAfAgAgwEAF4MBAAFxggZuAAB8AQBkLQBrAgBzmgZ8AQBkJwBrAgByXgl8BgBkBgBrAgBzrAZ8AgBywAZ0AABqDgBkNQB8AQAXgwEAAW4AAHQFAHQGAGoHAGQPAIMBAIMBAH0SAHQMAGoNAHwSAIMBAH0hAHwhAB59EwB8AQBkLQBrAgByBwd8IQBqJwB8EQCDAQABbiUAfCEAaicAdAwAag0AdAUAdAYAagcAZAwAgwEAgwEAgwEAgwEAAWcAAH0iAHQbAGobAIMAAH0jAGQYAH0DAHh3AXQcAGQWAHQdAHwhAIMBAHwDAIMDAERdXQF9HgB8CgBydAd8AAAMcnQHUG4AAHirAHwhAHweAHweAHwDABchRF2YAH0ZAHwZAHwLAGsGAHKeB3GGB24AAHljAHQeAGofAGQwAHQgAGQxAHwZAHwPAHwQAHQEAHQKAGQnAGQWAGQlAGQaAHwFAHwjAHwAAHwCAGYNAIMAAn0fAHQEAHwfAF8hAHwfAGoiAIMAAAF8IgBqFAB8HwCDAQABV3GGBwEBAXQAAGoVAHQWAGoXAIMAAIMBAAFxhgdYcYYHV3iVAGcAAHwiAERdGAB9IAB8IABqIwCDAAByLAh8IABeAgBxLAhyuQh5TwB8IwBqJAB0BABkJQCDAgB9CgB8CwBqFAB8CgCDAQABdAYAaggAZDYAdAkAdAIAfAoAgwEAgwEAgwIAAXwAAHOYCHwTAH0LAFBuAABXcSUIBHQbAGolAGsKAHK1CAEBAWQHAH0KAHElCFhxJQhXcV0HV3wKAAxy5Ah0HQB8CwCDAQBkFgBrBABy5Ah8CwBkAwAZfQoAbgAAfAoAc/cIdCgAZBYAGX0KAG4AAHQGAGoIAGQ2AHQJAHQCAHwKAIMBAIMBAIMCAAF0BgBqCABkDwB0CQB0AgB8CwCDAQCDAQCDAgABfAYAZAYAawIAc0EJfAIAcl4JdAAAagEAZDcAdAIAfAoAgwEAF4MBAAFxXgluAABnAAB9JAB8AQBkLQBrAgBzfAl8AQBkOABrAgBySgt8BgBkBgBrAgBzjgl8AgByogl0AABqDgBkOQB8AQAXgwEAAW4AAHgnAHQpAGoqAIMAAERdGQBcAgB9JQB9JgB8JABqFAB8JQCDAQABca8JV3QrAHwkAIMBAH0kAHgUAXwkAERdDAF9JQB8JQB9DAB0BgBqCABkOgB0CQB0AgB8DACDAQCDAQCDAgABeUYAZAcAfRYAfAQAag8AfA0AZBMAdAoAZBQAdAQAZCYAZDgAZBUAZBYAZBcAZCUAZBkAZBoAZDsAfAUAgwEHahAAfRYAV24jAAEBAXQAAGoVAHQWAGoXAIMAAIMBAAFkBwB9DABx3wluAQBYfBYAcuUKdBEAahIAZCgAZAcAfBYAgwMAfRYAdCwAai0AfBYAfA4AgwIAfScAfCcAc9gKfAYAZAYAawIAc7gKfAIAcswKdAAAahUAZDwAfAwAF4MBAAFuAABkBwB9DABx3wlx6wp8AABz6wpQcesKcd8JZAcAfQwAcd8JV3wMAAxyGwt8AQBkLQBrAgByGwt0BgBqCABkOgB0CQBkPQCDAQCDAgABcUoLfAwADHJKC3wBAGQtAGsDAHJKC3QGAGoIAGQ6AHQJAGQHAIMBAIMCAAFxSgtuAAB0LgBkPgB8AgCDAAEBZAAAUyg/AAAATnMGAAAAVGVzdDogcw4AAAAsIFByb3h5X2luaXQ6IGn/////UgwAAABSCwAAAFIKAAAAUgUAAABzNgAAAGh0dHA6Ly93d3cubWVqb3J0b3JyZW50LmNvbS90b3JyZW50cy1kZS1wZWxpY3VsYXMuaHRtbHM0AAAAPGEgaHJlZj0iKCg/OlteIl0rKT8vcGVsaS1kZXNjYXJnYXItdG9ycmVudFteIl0rKSI+P3NDAAAAaHR0cDovL2dudWxhLm51L3BlbGljdWxhcy1vbmxpbmUvbGlzdGEtZGUtcGVsaWN1bGFzLW9ubGluZS1wYXJ0ZS0xL3NeAAAAPGEgY2xhc3M9Ik50b29sdGlwIiBocmVmPSIoW14iXSspIj4oW148XSspPHNwYW4+PGJyW148XSs8aW1nIHNyYz0iKFteIl0rKSI+PC9zcGFuPjwvYT4oLio/KTxiclJKAAAAcxsAAABUYWJsYSBpbmljaWFsIFByb3h5RGlyZWN0OiBzFwAAAFRhYmxhIGluaWNpYWwgUHJveHlDRjogUkwAAAB0AgAAAFNHdAIAAABIS3NLAAAAaHR0cHM6Ly93d3cucHJveHktbGlzdC5kb3dubG9hZC9hcGkvdjEvZ2V0P3R5cGU9aHR0cHMmYW5vbj1lbGl0ZSZjb3VudHJ5PSVzUg0AAABSDgAAAHQNAAAAcHJveHlfcmV0cmllc2kAAAAAdBEAAABjb3VudF9yZXRyaWVzX3RvdGkCAAAAUg8AAABpCgAAAFIQAAAAcxgAAABccnxcdHwmbmJzcDt8PGJyPnxcc3syLH10AQAAACdzAgAAAFxucwQAAAAnLCAncwMAAAAsICdp/f///3MaAAAAVGFibGEgcHJveHktbGlzdC5kb3dubG9hZCBzAgAAADogc1EAAABodHRwczovL2hpZGVteW5hLm1lL2VuL3Byb3h5LWxpc3QvP2NvdW50cnk9SEtOTFNHJm1heHRpbWU9MTAwMCZ0eXBlPXMmYW5vbj00I2xpc3RpAQAAAHQMAAAAZm9yY2VkX3Byb3h5UgYAAABzGwAAAFxufFxyfFx0fCZuYnNwO3w8YnI+fFxzezIsfXMoAAAAPHRkIGNsYXNzPXRkbD4oLio/KTxcL3RkPjx0ZD4oLio/KTxcL3RkPnMFAAAAJXM6JXNzEwAAAFRhYmxhIEhpZGVNeS5uYW1lOiBzGQAAAFRhYmxhIEFOVEVTIGRlbCB0ZXN0aW5nOiBSXgAAAHQLAAAAUHJveHlEaXJlY3RzEgAAAElOSVQgUHJveHlEaXJlY3Q6IFJDAAAAdAQAAABhcmdzaQcAAABSCAAAAHMSAAAAUHJveHlEaXJlY3QgYWRkcjogcw4AAABJTklUIFByb3h5Q0Y6IFJLAAAAcw4AAABQcm94eUNGIGFkZHI6IFIHAAAAcw8AAABJTklUIFByb3h5V2ViOiBSTQAAAFIRAAAAcxAAAABQcm94eVdlYiBlcnJvcjogcwcAAABoaWRlLm1lUhcAAAAoLwAAAFIBAAAAUi8AAABSMgAAAFIaAAAAUhwAAAB0DQAAAGRlY3J5cHRfcHJveHlSAAAAAFIdAAAAUiUAAABSJgAAAFIkAAAAUh8AAAB0AwAAAGFzdHQMAAAAbGl0ZXJhbF9ldmFsdAUAAABkZWJ1Z1IpAAAAUioAAAB0AgAAAHJldAMAAABzdWJ0CAAAAGVuZHN3aXRodAYAAABhcHBlbmRSHgAAAFItAAAAUi4AAAB0BwAAAGNvbXBpbGV0BgAAAERPVEFMTHQHAAAAZmluZGFsbHQFAAAAUXVldWV0BQAAAHJhbmdlUlcAAABSRAAAAFJFAAAAdA8AAAB0ZXN0X3Byb3h5X2FkZHJ0BgAAAGRhZW1vblJGAAAAdAcAAABpc0FsaXZldAMAAABnZXR0BQAAAEVtcHR5UlAAAAB0BgAAAGV4dGVuZFJTAAAAdAYAAABsbDExbGxSMAAAAHQGAAAAc29ydGVkUisAAABSLAAAAFIzAAAAKCgAAABSGQAAAHQKAAAAcHJveHlfaW5pdFIXAAAAdAgAAABsb3RlX2xlblIaAAAAUhEAAABSDAAAAHQMAAAAcHJveGllc19zYXZlUggAAABSSgAAAFJLAAAAUkwAAABSTQAAAHQOAAAAcHJveHlfdXJsX3Rlc3R0EgAAAHByb3h5X3BhdHRlcm5fdGVzdHQRAAAAcHJveHlfQ0ZfdXJsX3Rlc3R0FQAAAHByb3h5X0NGX3BhdHRlcm5fdGVzdFJYAAAAdAsAAABwcm94aWVzX3N0cnQMAAAAcHJveGllc19pbml0dAwAAABjb3VudHJ5X2xpc3R0BwAAAGNvdW50cnlSKgAAAHQHAAAAbWF0Y2hlc3QFAAAAdmFyX2F0BwAAAHByb3h5X2F0BgAAAHBhdHJvbnQFAAAAdmFyX2N0DAAAAHRocmVhZHNfbGlzdHQJAAAAcHJveHlfcXVldAoAAABwcm94eV9sb3RldAwAAABwcm94eV90aHJlYWR0CAAAAHRocmVhZF94dAoAAABwcm94aWVzX2NmdA8AAAB0aHJlYWRzX2xpc3RfQ0Z0DAAAAHByb3h5X3F1ZV9DRlJZAAAAUloAAABSWwAAAHQJAAAAZGF0YV90ZXN0KAAAAAAoAAAAAHMIAAAAPHN0cmluZz5SNwAAAOYAAABzbAEAAAABBgAoAQwCBgEDARkBAwEGARoBEgAJAQYBBgEGAQYBBgEGAgYBBgIGAQYDBwMGARUBDwEHBRIAGgESACkDDAENAQMBBgE6AgYBFQEZAQ8BDQEGAQ8CDQEGAQwBGwEDARcBEgAmAwYCAwEGAUABAwEXAgYBFQEGAh4CEwEQAQwBFAISAB0dAwISABoDGAESABQBBgEMAh8BDQEEAhgBDAEGAQMBPwEJAQoBEQEDARsCKAEDARIBDQEcAQYBBgEIARABEgIZAQ0BEwENAhwBHAESAB0DGAESABQBFQEPAQcCDAEQAiUDBgEMAQYCHwINAQQCGAEMAQYBAwE/AQkBCgERAQMBGwIoAQMBEgENARwBBgEGAQgBEAESAhkBDQEGAQ0CHAEcARIAHQMGARgBEgAUAhkBEQIMAg0BBgEcAQMBBgFAAQMBEwEGAQcCBgEVARIBBgESABQBBgEGAgYBBwIKAhMBGQETARwCDQFjDQAAABIAAAASAAAAQwAAAHM6AQAAZAEAZAAAbAAAfQ0AdAEAagIAfAEAZAIAgwIAfQ4AfA4Acy0AZAMAfQ4AbgAAaQEAfAAAfA4ANn0PAHlMAGQEAH0QAHwNAGoDAHwBAGQFAHwDAGQGAHwEAGQHAHwFAGQIAHwPAGQJAHwGAGQKAHwHAGQLAHwIAGQMAHwJAIMBCGoEAH0QAFduGwABAQF0BQBqBgB0BwBqCACDAACDAQABZAQAU1h8EAByMAF8AgByCwF0CQBqCgBkDQBkBAB8EACDAwB9EAB0AQBqAgB8EAB8AgCDAgB9EQB8EQByAgF0CwB8CgB0DABqDACDAgByCAF8CgBqDQB8AACDAQABcQgBcS0BZAQAfQAAcTYBdAsAfAoAdAwAagwAgwIAcjYBfAoAag0AfAAAgwEAAXE2AW4GAGQEAH0AAHwAAFMoDgAAAE5p/////3MNAAAAKGh0dHAuKik6XC9cL3QEAAAAaHR0cFIFAAAAUg0AAABSDgAAAFJkAAAAdBEAAABwcm94eV9hZGRyX2ZvcmNlZFJhAAAAUmIAAABSDwAAAFIRAAAAcxsAAABcbnxccnxcdHwmbmJzcDt8PGJyPnxcc3syLH0oDgAAAFIaAAAAUisAAABSLAAAAFIpAAAAUioAAABSAQAAAFIeAAAAUi0AAABSLgAAAFJrAAAAUmwAAAB0CgAAAGlzaW5zdGFuY2VScgAAAHQDAAAAcHV0KBIAAABSCAAAAFJ/AAAAUoAAAABSDQAAAFIOAAAAUmQAAABSYQAAAFJiAAAAUg8AAABSEQAAAFKNAAAAUhkAAABSFwAAAFIaAAAAdAYAAABoZWFkZXJSiQAAAFIqAAAAUpQAAAAoAAAAACgAAAAAcwgAAAA8c3RyaW5nPlJ0AAAA4gEAAHMuAAAAAAMMAhIBBgAJAQ0CAwEGAUYBAwETAQUDBgEGARUBEgEGARIBEwIJAhIBEwIGAmMBAAAACAAAAAgAAABDAAAAc6wBAAB0AAB0AQBqAgBkAQCDAQCDAQB9AQB0AAB0AQBqAgBkAgCDAQCDAQB9AgB0AAB0AQBqAgBkAwCDAQCDAQB9AwB0AAB0AQBqAgBkBACDAQCDAQB9BAB0AAB0AQBqAgBkBQCDAQCDAQB9BQB0AAB0AQBqAgBkBgCDAQCDAQB9BgB0AAB0AQBqAgBkBwCDAQCDAQB9BwB8AQBkCABrAgBzpQB8AAByIgF0AwBqBABkCQB0BQB8AwCDAQAXZAoAF3QFAHwEAIMBABdkCwAXdAUAfAUAgwEAF2QMABd0BQB8BgCDAQAXZA0AF3QFAHwHAIMBABdkDgAXdAUAdAAAdAEAagIAZA8AgwEAgwEAgwEAF2QQABd0BQB8AgCDAQAXgwEAAW6GAHQDAGoGAHQFAHQHAGQJAHQFAHwDAIMBABdkCgAXdAUAfAQAgwEAF2QLABd0BQB8BQCDAQAXZAwAF3QFAHwGAIMBABdkDQAXdAUAfAcAgwEAF2QOABd0BQB0AAB0AQBqAgBkDwCDAQCDAQCDAQAXZBAAF3QFAHwCAIMBABeDAQCDAQCDAQABZAAAUygRAAAATlIMAAAAUhUAAABSCAAAAFJKAAAAUksAAABSTAAAAFJNAAAAUgoAAABzGgAAAFBST1hZIExpc3RzOiBQcm94eURpcmVjdDogcxUAAAAgLyBQcm94eURpcmVjdCBQb29sOiBzDAAAACAvIFByb3h5Q0Y6IHMRAAAAIC8gUHJveHlDRiBQb29sOiBzDQAAACAvIFByb3h5V2ViOiBzFAAAACAvIFByb3h5IFdoaXRlbGlzdDogUk8AAABzFQAAACAvIEJsb3F1ZWQgQ2hhbm5lbHM6ICgIAAAAUmcAAABSAAAAAFIdAAAAUgEAAABSLwAAAFIyAAAAUmoAAABSJgAAACgIAAAAUhcAAABSDAAAAHQZAAAAcHJveHlfY2hhbm5lbF9ibG9xdWVkX3N0clIIAAAAUkoAAABSSwAAAFJMAAAAUk0AAAAoAAAAACgAAAAAcwgAAAA8c3RyaW5nPlIzAAAABQIAAHMWAAAAAAMVARUBFQEVARUBFQEVAhIBfQKGAmMDAAAACwAAAAQAAABDAAAAc24BAAB0AAB8AQAZZAEAGX0DAHQAAHwBABlkAgAZdAAAfAEAGWQDABkXfQQAdAAAfAEAGWQEABl9BQB0AAB8AQAZZAMAGX0GAHQAAHwBABlkBQAZfQcAdAAAfAEAGWQGABl9CAB0AQByggB0AgBqAwBkBwB8AwAXZAgAF3wEABeDAQABbgAAfAIAcp0AfAEAdAQAawMAcp0AZAkAfQEAbgAAZAkAfQkAfAEAcjABdAUAagYAfAAAgwEAfQAAfAIAcuIAfAcAagcAZAoAgwEAfQoAfAQAfAAAF3wKAGQBABkXfQAAbi4AfAgAfAAAFn0CAHwCAGoIAGQLAGQMAIMCAGoIAGQNAGQOAIMCAH0CAHwDAH0AAHwFAHIwAWkCAGQPAGQQADZ8BQBkEQA2fQkAcTABbgAAdAEAcl4BdAIAagkAfAAAZBIAF3wCABdkEgAXfAkAF2QSABd8AQAXgwEAAW4AAHwAAHwCAHwJAHwBAGYEAFMoEwAAAE5pAAAAAGkBAAAAaQMAAABpAgAAAGkEAAAAaQUAAABzDAAAAFBST1hZIFBPU1Q6IHMIAAAAIC8gR0VUOiBSBQAAAHQBAAAAfHQBAAAAW3MDAAAAJTVCdAEAAABdcwMAAAAlNURzIQAAAGFwcGxpY2F0aW9uL3gtd3d3LWZvcm0tdXJsZW5jb2RlZHMMAAAAQ29udGVudC1UeXBldAcAAABSZWZlcmVycwMAAAAgLyAoCgAAAFJ6AAAAUhcAAABSAQAAAFIvAAAAdAYAAABsbDExMTF0BgAAAHVybGxpYnQKAAAAcXVvdGVfcGx1c1JWAAAAUlUAAABSagAAACgLAAAAdAMAAAB1cmxSTQAAAHQEAAAAcG9zdHQTAAAAcHJveHlfc2l0ZV91cmxfcG9zdHQSAAAAcHJveHlfc2l0ZV91cmxfZ2V0dBIAAABwcm94eV9zaXRlX3JlZmVyZXJ0EQAAAHByb3h5X3NpdGVfaGVhZGVydA8AAABwcm94eV9zaXRlX3RhaWx0DwAAAHByb3h5X3NpdGVfcG9zdHQHAAAAaGVhZGVyc3QEAAAAdGFpbCgAAAAAKAAAAABzCAAAADxzdHJpbmc+dA0AAABzZXRfcHJveHlfd2ViGAIAAHMwAAAAAAIOARoBDgEOAQ4BDgIGABwBEgEJAQYCBgEPAQYBDwEVAgoBHgEGAQYBGgIGACgBYwMAAAAIAAAABAAAAEMAAABzBwEAAHQAAHwBABlkAQAZfQMAdAAAfAEAGWQCABl9BABkAwB8AABrBgByMgBkBAB9AAB8AABTdAEAagIAfAAAZAUAgwIADHIDAXwAAGoDAGQGAIMBAAxyAwF0BABqBQB8AACDAQB9AABnAAB9BQB8AwBqBgBkBwCDAQB9BQB4IAB8BQBEXRgAfQYAfAAAagcAfAYAZAgAgwIAfQAAcYAAV2cAAH0FAHwEAGoGAGQHAIMBAH0FAHggAHwFAERdGAB9BwB8AABqBwB8BwBkCACDAgB9AABxuABXfAAAagcAZAkAfAIAFmQKAIMCAH0AAHwAAGoHAGQLAHwCABZkDACDAgB9AABuAAB8AABTKA0AAABOaQMAAABpBAAAAHM2AAAASG90bGlua2luZyBkaXJlY3RseSB0byBwcm94aWVkIHBhZ2VzIGlzIG5vdCBwZXJtaXR0ZWQudAUAAABFUlJPUnMNAAAAXmRcZCs6Lio/XGQrOnQCAAAAUEtSmwAAAFIFAAAAcxAAAABocmVmPSIlc21hZ25ldDo/cw4AAABocmVmPSJtYWduZXQ6P3MQAAAAaHJlZj0nJXNtYWduZXQ6P3MOAAAAaHJlZj0nbWFnbmV0Oj8oCAAAAFJ6AAAAUisAAABSLAAAAHQKAAAAc3RhcnRzd2l0aFKgAAAAdAcAAAB1bnF1b3RlUlYAAABSVQAAACgIAAAAUioAAABSTQAAAFKiAAAAUqcAAABSqAAAAHQQAAAAcHJveHlfc2l0ZV90YWJsZXQMAAAAcHJveHlfaGVhZGVydAoAAABwcm94eV90YWlsKAAAAAAoAAAAAHMIAAAAPHN0cmluZz50FwAAAHJlc3RvcmVfYWZ0ZXJfcHJveHlfd2ViNgIAAHMkAAAAAAIOAQ4CDAEGAQQCIwEPAQYBDwENARYBBgEPAQ0BFgEWARkCYwIAAAAEAAAABQAAAEMAAABzvQAAAHQAAHQBAGoCAGQBAIMBAIMBAH0CAHQDAIMAAH0DAHwCAHI2AHQEAGoFAHwCAIMBAH0DAG4AAHwAAGoGAGQCAIMBAHNSAHwAAGQCADd9AABuAAB0BwBqCAB8AABkAwCDAgB8AwBrBgByuQB8AQBydAB0CQBTZAQAfAMAdAcAaggAfAAAZAMAgwIAGWsGAHK5AHQKAHKyAHQLAGoMAHQHAGoIAHwAAGQDAIMCAIMBAAFuAAB0CQBTbgAAdA0AUygFAAAATlIVAAAAdAEAAAAvcyQAAAAoPzpodHRwLio6XC9cLyk/KFteXD98XC9dKykoPzpcP3xcLylSEwAAACgOAAAAUmcAAABSAAAAAFIdAAAAdAQAAABkaWN0UmgAAABSaQAAAFJtAAAAUisAAABSLAAAAFIcAAAAUhcAAABSAQAAAFJqAAAAUiQAAAAoBAAAAFKiAAAAUmQAAABSmgAAAFIVAAAAKAAAAAAoAAAAAHMIAAAAPHN0cmluZz50EgAAAGNoYW5uZWxfcHJveHlfbGlzdE8CAABzHAAAAAACFQEJAQYBEgIPAQ0BGAEGAQQBHAEGABwBBwJjAwAAAA4AAAAEAAAAQwAAAHNCAwAAZAEAfQMAZAEAfQQAZAEAfQUAdAAAfQYAZAEAfQcAZAIAfQgAZAEAfQkAfAAAfQoAdAEAdAIAagMAZAMAgwEAgwEAfQMAdAEAdAIAagMAZAQAgwEAgwEAfQQAdAEAdAIAagMAZAUAgwEAgwEAfQUAdAEAdAIAagMAZAYAgwEAgwEAfQsAdAEAdAIAagMAZAcAgwEAgwEAfQwAdAQAagUAfAoAZAgAgwIAfQgAfAoAagYAZAkAgwEAc8cAfAoAZAkAN30KAG4AAHQEAGoFAHwKAGQKAIMCAH0JAHwJAHLCAXwFAHLCAXwCAAxywgF8CQB8BQBrBgBywgF0BwB9BgB0CACDAAB9DQB0CQBqCgB8BQCDAQB9DQBkCwB8DQB8CQAZawYAcl4BZAEAfQMAZAEAfQQAZAwAfA0AfAkAGWsGAHK8AXwNAHwJABlqCwBkDQBkAQCDAgB9CwBxvAFxvwFkDgB8DQB8CQAZawYAcqYBZAEAfQMAZAEAfQsAZAwAfA0AfAkAGWsGAHK8AXwNAHwJABlqCwBkDwBkAQCDAgB9BABxvAFxvwF8DQB8CQAZfQMAZAEAfQQAZAEAfQsAccIBbgAAfAIAchwCfAIAZBAAawIActcBcTECfAIAZA4AawIAcvIBZAEAfQMAZAEAfQsAcTECfAIAZAsAawIAcg0CZAEAfQMAZAEAfQQAcTECZAEAfQQAZAEAfQsAbhUAfAYAczECZAEAfQQAZAEAfQsAbgAAfAsAckACfAAAfQcAbgAAfAMAclwCfAMAfQcAaQEAfAMAfAgANn0DAG4AAHwEAHJ4AnwEAH0HAGkBAHwEAHwIADZ9BABuAAB8AwAMctMCfAQADHLTAnwLAAxy0wJ0AQB0AgBqAwBkBACDAQCDAQB9BABkAQB9AwBkAQB9CwB8BAByygJ8BAB9BwBpAQB8BAB8CAA2fQQAcdMCZAEAfQQAbgAAfAwAZBEAawMAcugCZAEAfQcAbgAAdAwAcjIDdA0Aag4AZBIAdA8AfAMAgwEAF2QTABd0DwB8BACDAQAXZBQAF3QPAHwLAIMBABdkFQAXdA8AfAcAgwEAF4MBAAFuAAB8AwB8BAB8CwB8BwBmBABTKBYAAABOUgUAAABSlQAAAFIIAAAAUksAAABSTwAAAFJNAAAAUgwAAABzDQAAAChodHRwLiopOlwvXC9StQAAAHMkAAAAKD86aHR0cC4qOlwvXC8pPyhbXlw/fFwvXSspKD86XD98XC8pUgcAAAB0AQAAADpzCQAAAFByb3h5V2ViOlIGAAAAcwgAAABQcm94eUNGOlJeAAAAUgoAAABzBwAAAFByb3h5OiBzDQAAACAvIFByb3h5IENGOiBzDQAAACAvIFByb3h5V2ViOiBzDQAAACAvIFByb3h5TG9nOiAoEAAAAFIkAAAAUmcAAABSAAAAAFIdAAAAUisAAABSLAAAAFJtAAAAUhwAAABStgAAAFJoAAAAUmkAAABSVQAAAFIXAAAAUgEAAABSagAAAFIyAAAAKA4AAABSogAAAFKjAAAAUmQAAABSiQAAAHQKAAAAcHJveHlfQ0ZfYXQUAAAAcHJveHlfd2hpdGVfbGlzdF9zdHJ0BwAAAHByb3h5X3d0CQAAAHByb3h5X2xvZ1KZAAAAdAYAAABkb21haW50BQAAAHVybF9mUk0AAABSDAAAAFJPAAAAKAAAAAAoAAAAAHMIAAAAPHN0cmluZz50DgAAAGdldF9wcm94eV9hZGRyYgIAAHOMAAAAAAIGAQYBBgEGAQYBBgEGAQYCFQEVARUBFQEVAhIBDwENARICEwEMAQYBCQEPARABBgEGARABHAEQAQYBBgEQARwCCgEGAQwCBgEMAQMBDAEGAQkBDAEGAQkCBgEJAQYBBgEJAgYBCQEGAQYBEAEGAQYBEAEVARUBBgEGAQYBBgEQAgkBDAEJAgYARAFjAQAAAAEAAAADAAAAQwAAAHNRAAAAdAAAchYAdAEAagIAfAAAgwEAAW4AAHwAAHI3AHQDAGoEAHwAAGoFAGQBAIMBAIMBAH0AAG4AAHQAAHJNAHQBAGoCAHwAAIMBAAFuAAB8AABTKAIAAABOcwUAAAB1dGYtOCgGAAAAUhcAAABSAQAAAFJqAAAAdAYAAABiYXNlNjR0CQAAAGI2NGVuY29kZXQGAAAAZW5jb2RlKAEAAABSKgAAACgAAAAAKAAAAABzCAAAADxzdHJpbmc+UiYAAAC0AgAAcw4AAAAAAQYAEAIGARsCBgAQAWMBAAAAAQAAAAIAAABDAAAAc1EAAAB0AAByFgB0AQBqAgB8AACDAQABbgAAfAAAcjcAdAMAagQAfAAAgwEAagUAZAEAgwEAfQAAbgAAdAAAck0AdAEAagIAfAAAgwEAAW4AAHwAAFMoAgAAAE5zBQAAAHV0Zi04KAYAAABSFwAAAFIBAAAAUmoAAABSwAAAAHQJAAAAYjY0ZGVjb2RldAYAAABkZWNvZGUoAQAAAFIqAAAAKAAAAAAoAAAAAHMIAAAAPHN0cmluZz5SZwAAAL4CAABzDgAAAAABBgAQAgYBGwIGABABaWMAAABpRwAAAGl4AAAAaTEAAABpWgAAAGkyAAAAaWwAAABpdQAAAGlMAAAAaW4AAABpcAAAAGlWAAAAaXYAAABpbQAAAGlGAAAAaXMAAABpRQAAAGk9AAAAKAYAAABzOgAAAGh0dHBzOi8vbmwuaGlkZXByb3h5Lm1lL2luY2x1ZGVzL3Byb2Nlc3MucGhwP2FjdGlvbj11cGRhdGVzFwAAAGh0dHBzOi8vbmwuaGlkZXByb3h5Lm1lUgUAAABzCgAAAC9nby5waHA/dT1zDQAAACZiPTR8JmFtcDtiPTRzOAAAAHU9JXMmcHJveHlfZm9ybWRhdGFfc2VydmVyPW5sJmFsbG93Q29va2llcz0xJmVuY29kZVVSTD0wKAYAAABzNQAAAGh0dHA6Ly93ZWJwcm94eS50by9pbmNsdWRlcy9wcm9jZXNzLnBocD9hY3Rpb249dXBkYXRlcxIAAABodHRwOi8vd2VicHJveHkudG9SBQAAAHMOAAAAL2Jyb3dzZS5waHA/dT1zDQAAACZiPTR8JmFtcDtiPTRzRgAAAHU9JXMmZW5jb2RlVVJMPTAmZW5jb2RlUGFnZT0wJmFsbG93Q29va2llcz1vbiZzdHJpcEpTPTAmc3RyaXBPYmplY3RzPTAoJgAAAHQHAAAAX19kb2NfX1KgAAAAUsAAAABSawAAAFJHAAAAUkQAAABSLQAAAFJoAAAAUlEAAABScgAAAHQMAAAAcGxhdGZvcm1jb2RlUgAAAABSAQAAAFICAAAAdBMAAABwbGF0Zm9ybWNvZGUubG9nZ2VyUgMAAABSKwAAAFIkAAAAUhcAAABSJwAAAFJQAAAAUlMAAABSegAAAFKfAAAAUlQAAABSSQAAAFIfAAAAUjcAAABSdAAAAFIzAAAAdAQAAABOb25lUqwAAABStAAAAFK3AAAAUr8AAABSJgAAAFJnAAAAdAIAAABsMSgAAAAAKAAAAAAoAAAAAHMIAAAAPHN0cmluZz50CAAAADxtb2R1bGU+OQAAAHNGAAAABgIMAQwBDAEMAQwBDAEMAQwBDAIcARABDAIGAjcCbwJCAgMBAAEHAQABCgYGAiIDCVwJKhX8CSMMEwweCRkMEw9SCQoJCA==')))')) \ No newline at end of file diff --git a/core/scrapertools.py b/core/scrapertools.py index a0e5b2a1..5bb50a7b 100644 --- a/core/scrapertools.py +++ b/core/scrapertools.py @@ -1,27 +1,17 @@ # -*- coding: utf-8 -*- # -------------------------------------------------------------------------------- -# Scraper tools for reading and processing web elements +# Scraper tools v2 for reading and processing web elements # -------------------------------------------------------------------------------- import re import time -from core import httptools +import urlparse + +from core.entities import html5 from platformcode import logger -def get_header_from_response(url, header_to_get="", post=None, headers=None): - header_to_get = header_to_get.lower() - response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True) - return response.headers.get(header_to_get) - - -def read_body_and_headers(url, post=None, headers=None, follow_redirects=False, timeout=None): - response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects, - timeout=timeout) - return response.data, response.headers - - def printMatches(matches): i = 0 for match in matches: @@ -42,8 +32,37 @@ def find_multiple_matches(text, pattern): return re.findall(pattern, text, re.DOTALL) -def entityunescape(cadena): - return unescape(cadena) +def find_multiple_matches_groups(text, pattern): + r = re.compile(pattern) + return [m.groupdict() for m in r.finditer(text)] + + +# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 +def decodeHtmlentities(data): + entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)") + + def substitute_entity(match): + ent = match.group(2) + match.group(3) + res = "" + while not ent in html5 and not ent.endswith(";") and match.group(1) != "#": + # Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos + try: + res = ent[-1] + res + ent = ent[:-1] + except: + break + + if match.group(1) == "#": + ent = unichr(int(ent.replace(";", ""))) + return ent.encode('utf-8') + else: + cp = html5.get(ent) + if cp: + return cp.decode("unicode-escape").encode('utf-8') + res + else: + return match.group() + + return entity_re.subn(substitute_entity, data)[0] def unescape(text): @@ -84,47 +103,6 @@ def unescape(text): # Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 -def decodeHtmlentities(string): - string = entitiesfix(string) - entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});") - - def substitute_entity(match): - from htmlentitydefs import name2codepoint as n2cp - ent = match.group(2) - if match.group(1) == "#": - return unichr(int(ent)).encode('utf-8') - else: - cp = n2cp.get(ent) - - if cp: - return unichr(cp).encode('utf-8') - else: - return match.group() - - return entity_re.subn(substitute_entity, string)[0] - - -def entitiesfix(string): - # Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ). - string = string.replace("á", "á") - string = string.replace("é", "é") - string = string.replace("í", "í") - string = string.replace("ó", "ó") - string = string.replace("ú", "ú") - string = string.replace("Á", "Á") - string = string.replace("É", "É") - string = string.replace("Í", "Í") - string = string.replace("Ó", "Ó") - string = string.replace("Ú", "Ú") - string = string.replace("ü", "ü") - string = string.replace("Ü", "Ü") - string = string.replace("ñ", "ñ") - string = string.replace("¿", "¿") - string = string.replace("¡", "¡") - string = string.replace(";;", ";") - return string - - def htmlclean(cadena): cadena = re.compile("<!--.*?-->", re.DOTALL).sub("", cadena) @@ -226,7 +204,7 @@ def htmlclean(cadena): cadena = re.compile("<link[^>]*>", re.DOTALL).sub("", cadena) cadena = cadena.replace("\t", "") - cadena = entityunescape(cadena) + # cadena = entityunescape(cadena) return cadena @@ -314,8 +292,8 @@ def remove_show_from_title(title, show): return title +# scrapertools.get_filename_from_url(media_url)[-4:] def get_filename_from_url(url): - import urlparse parsed_url = urlparse.urlparse(url) try: filename = parsed_url.path @@ -332,19 +310,18 @@ def get_filename_from_url(url): return filename -# def get_domain_from_url(url): -# import urlparse -# parsed_url = urlparse.urlparse(url) -# try: -# filename = parsed_url.netloc -# except: -# # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" -# if len(parsed_url) >= 4: -# filename = parsed_url[1] -# else: -# filename = "" -# -# return filename +def get_domain_from_url(url): + parsed_url = urlparse.urlparse(url) + try: + filename = parsed_url.netloc + except: + # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" + if len(parsed_url) >= 4: + filename = parsed_url[1] + else: + filename = "" + + return filename def get_season_and_episode(title): @@ -365,22 +342,15 @@ def get_season_and_episode(title): @return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado """ filename = "" - # 4l3x87 - fix for series example 9-1-1 - # original_title = title - # title = title.replace('9-1-1','') - patrons = ["(\d+)\s*[x-]\s*(\d+)", "(\d+)\s*×\s*(\d+)", "(?:s|t)(\d+)e(\d+)", - "(?:season|temp|stagione\w*)\s*(\d+)\s*(?:capitulo|epi|episode|episodio\w*)\s*(\d+)"] + patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)", + "(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"] for patron in patrons: try: matches = re.compile(patron, re.I).search(title) - if matches: - if len(matches.group(1)) == 1: - filename = matches.group(1) + "x" + matches.group(2).zfill(2) - else: - filename = matches.group(1).lstrip('0') + "x" + matches.group(2).zfill(2) + filename = matches.group(1) + "x" + matches.group(2).zfill(2) break except: pass @@ -388,3 +358,27 @@ def get_season_and_episode(title): logger.info("'" + title + "' -> '" + filename + "'") return filename + + +def get_sha1(cadena): + try: + import hashlib + devuelve = hashlib.sha1(cadena).hexdigest() + except: + import sha + import binascii + devuelve = binascii.hexlify(sha.new(cadena).digest()) + + return devuelve + + +def get_md5(cadena): + try: + import hashlib + devuelve = hashlib.md5(cadena).hexdigest() + except: + import md5 + import binascii + devuelve = binascii.hexlify(md5.new(cadena).digest()) + + return devuelve diff --git a/core/scrapertoolsV2.py b/core/scrapertoolsV2.py deleted file mode 100644 index a8e5b163..00000000 --- a/core/scrapertoolsV2.py +++ /dev/null @@ -1,346 +0,0 @@ -# -*- coding: utf-8 -*- -# -------------------------------------------------------------------------------- -# Scraper tools v2 for reading and processing web elements -# -------------------------------------------------------------------------------- - -import re -import time - -import urlparse - -from core.entities import html5 -from platformcode import logger - - -def printMatches(matches): - i = 0 - for match in matches: - logger.info("%d %s" % (i, match)) - i = i + 1 - - -def find_single_match(data, patron, index=0): - try: - matches = re.findall(patron, data, flags=re.DOTALL) - return matches[index] - except: - return "" - - -# Parse string and extracts multiple matches using regular expressions -def find_multiple_matches(text, pattern): - return re.findall(pattern, text, re.DOTALL) - - -def find_multiple_matches_groups(text, pattern): - r = re.compile(pattern) - return [m.groupdict() for m in r.finditer(text)] - - -# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 -def decodeHtmlentities(data): - entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)") - - def substitute_entity(match): - ent = match.group(2) + match.group(3) - res = "" - while not ent in html5 and not ent.endswith(";") and match.group(1) != "#": - # Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos - try: - res = ent[-1] + res - ent = ent[:-1] - except: - break - - if match.group(1) == "#": - ent = unichr(int(ent.replace(";", ""))) - return ent.encode('utf-8') - else: - cp = html5.get(ent) - if cp: - return cp.decode("unicode-escape").encode('utf-8') + res - else: - return match.group() - - return entity_re.subn(substitute_entity, data)[0] - - -def htmlclean(cadena): - cadena = re.compile("<!--.*?-->", re.DOTALL).sub("", cadena) - - cadena = cadena.replace("<center>", "") - cadena = cadena.replace("</center>", "") - cadena = cadena.replace("<cite>", "") - cadena = cadena.replace("</cite>", "") - cadena = cadena.replace("<em>", "") - cadena = cadena.replace("</em>", "") - cadena = cadena.replace("<u>", "") - cadena = cadena.replace("</u>", "") - cadena = cadena.replace("<li>", "") - cadena = cadena.replace("</li>", "") - cadena = cadena.replace("<turl>", "") - cadena = cadena.replace("</tbody>", "") - cadena = cadena.replace("<tr>", "") - cadena = cadena.replace("</tr>", "") - cadena = cadena.replace("<![CDATA[", "") - cadena = cadena.replace("<wbr>", "") - cadena = cadena.replace("<Br />", " ") - cadena = cadena.replace("<BR />", " ") - cadena = cadena.replace("<Br>", " ") - cadena = re.compile("<br[^>]*>", re.DOTALL).sub(" ", cadena) - - cadena = re.compile("<script.*?</script>", re.DOTALL).sub("", cadena) - - cadena = re.compile("<option[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</option>", "") - - cadena = re.compile("<button[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</button>", "") - - cadena = re.compile("<i[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</iframe>", "") - cadena = cadena.replace("</i>", "") - - cadena = re.compile("<table[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</table>", "") - - cadena = re.compile("<td[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</td>", "") - - cadena = re.compile("<div[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</div>", "") - - cadena = re.compile("<dd[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</dd>", "") - - cadena = re.compile("<b[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</b>", "") - - cadena = re.compile("<font[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</font>", "") - - cadena = re.compile("<strong[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</strong>", "") - - cadena = re.compile("<small[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</small>", "") - - cadena = re.compile("<span[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</span>", "") - - cadena = re.compile("<a[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</a>", "") - - cadena = re.compile("<p[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</p>", "") - - cadena = re.compile("<ul[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</ul>", "") - - cadena = re.compile("<h1[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</h1>", "") - - cadena = re.compile("<h2[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</h2>", "") - - cadena = re.compile("<h3[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</h3>", "") - - cadena = re.compile("<h4[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</h4>", "") - - cadena = re.compile("<!--[^-]+-->", re.DOTALL).sub("", cadena) - - cadena = re.compile("<img[^>]*>", re.DOTALL).sub("", cadena) - - cadena = re.compile("<object[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</object>", "") - cadena = re.compile("<param[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</param>", "") - cadena = re.compile("<embed[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("</embed>", "") - - cadena = re.compile("<title[^>]*>", re.DOTALL).sub("", cadena) - cadena = cadena.replace("", "") - - cadena = re.compile("]*>", re.DOTALL).sub("", cadena) - - cadena = cadena.replace("\t", "") - # cadena = entityunescape(cadena) - return cadena - - -def slugify(title): - # print title - - # Sustituye acentos y eñes - title = title.replace("Á", "a") - title = title.replace("É", "e") - title = title.replace("Í", "i") - title = title.replace("Ó", "o") - title = title.replace("Ú", "u") - title = title.replace("á", "a") - title = title.replace("é", "e") - title = title.replace("í", "i") - title = title.replace("ó", "o") - title = title.replace("ú", "u") - title = title.replace("À", "a") - title = title.replace("È", "e") - title = title.replace("Ì", "i") - title = title.replace("Ò", "o") - title = title.replace("Ù", "u") - title = title.replace("à", "a") - title = title.replace("è", "e") - title = title.replace("ì", "i") - title = title.replace("ò", "o") - title = title.replace("ù", "u") - title = title.replace("ç", "c") - title = title.replace("Ç", "C") - title = title.replace("Ñ", "n") - title = title.replace("ñ", "n") - title = title.replace("/", "-") - title = title.replace("&", "&") - - # Pasa a minúsculas - title = title.lower().strip() - - # Elimina caracteres no válidos - validchars = "abcdefghijklmnopqrstuvwxyz1234567890- " - title = ''.join(c for c in title if c in validchars) - - # Sustituye espacios en blanco duplicados y saltos de línea - title = re.compile("\s+", re.DOTALL).sub(" ", title) - - # Sustituye espacios en blanco por guiones - title = re.compile("\s", re.DOTALL).sub("-", title.strip()) - - # Sustituye espacios en blanco duplicados y saltos de línea - title = re.compile("\-+", re.DOTALL).sub("-", title) - - # Arregla casos especiales - if title.startswith("-"): - title = title[1:] - - if title == "": - title = "-" + str(time.time()) - - return title - - -def remove_htmltags(string): - return re.sub('<[^<]+?>', '', string) - - -def remove_show_from_title(title, show): - # print slugify(title)+" == "+slugify(show) - # Quita el nombre del programa del título - if slugify(title).startswith(slugify(show)): - - # Convierte a unicode primero, o el encoding se pierde - title = unicode(title, "utf-8", "replace") - show = unicode(show, "utf-8", "replace") - title = title[len(show):].strip() - - if title.startswith("-"): - title = title[1:].strip() - - if title == "": - title = str(time.time()) - - # Vuelve a utf-8 - title = title.encode("utf-8", "ignore") - show = show.encode("utf-8", "ignore") - - return title - - -# scrapertools.get_filename_from_url(media_url)[-4:] -def get_filename_from_url(url): - parsed_url = urlparse.urlparse(url) - try: - filename = parsed_url.path - except: - # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" - if len(parsed_url) >= 4: - filename = parsed_url[2] - else: - filename = "" - - if "/" in filename: - filename = filename.split("/")[-1] - - return filename - - -def get_domain_from_url(url): - parsed_url = urlparse.urlparse(url) - try: - filename = parsed_url.netloc - except: - # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" - if len(parsed_url) >= 4: - filename = parsed_url[1] - else: - filename = "" - - return filename - - -def get_season_and_episode(title): - """ - Retorna el numero de temporada y de episodio en formato "1x01" obtenido del titulo de un episodio - Ejemplos de diferentes valores para title y su valor devuelto: - "serie 101x1.strm", "s101e1.avi", "t101e1.avi" -> '101x01' - "Name TvShow 1x6.avi" -> '1x06' - "Temp 3 episodio 2.avi" -> '3x02' - "Alcantara season 13 episodie 12.avi" -> '13x12' - "Temp1 capitulo 14" -> '1x14' - "Temporada 1: El origen Episodio 9" -> '' (entre el numero de temporada y los episodios no puede haber otro texto) - "Episodio 25: titulo episodio" -> '' (no existe el numero de temporada) - "Serie X Temporada 1" -> '' (no existe el numero del episodio) - @type title: str - @param title: titulo del episodio de una serie - @rtype: str - @return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado - """ - filename = "" - - patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)", - "(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"] - - for patron in patrons: - try: - matches = re.compile(patron, re.I).search(title) - if matches: - filename = matches.group(1) + "x" + matches.group(2).zfill(2) - break - except: - pass - - logger.info("'" + title + "' -> '" + filename + "'") - - return filename - - -def get_sha1(cadena): - try: - import hashlib - devuelve = hashlib.sha1(cadena).hexdigest() - except: - import sha - import binascii - devuelve = binascii.hexlify(sha.new(cadena).digest()) - - return devuelve - - -def get_md5(cadena): - try: - import hashlib - devuelve = hashlib.md5(cadena).hexdigest() - except: - import md5 - import binascii - devuelve = binascii.hexlify(md5.new(cadena).digest()) - - return devuelve diff --git a/core/servertools.py b/core/servertools.py index 17cb0dce..9a1ea592 100644 --- a/core/servertools.py +++ b/core/servertools.py @@ -506,8 +506,8 @@ def get_server_json(server_name): def get_server_host(server_name): - from core import scrapertoolsV2 - return [scrapertoolsV2.get_domain_from_url(pattern['url']) for pattern in get_server_json(server_name)['find_videos']['patterns']] + from core import scrapertools + return [scrapertools.get_domain_from_url(pattern['url']) for pattern in get_server_json(server_name)['find_videos']['patterns']] def get_server_controls_settings(server_name): diff --git a/core/support.py b/core/support.py index 7fac3ee5..a0f6954a 100755 --- a/core/support.py +++ b/core/support.py @@ -10,7 +10,7 @@ import urlparse import xbmcaddon from channelselector import thumb -from core import httptools, scrapertoolsV2, servertools, tmdb, channeltools +from core import httptools, scrapertools, servertools, tmdb, channeltools from core.item import Item from lib import unshortenit from platformcode import logger, config @@ -21,7 +21,7 @@ def hdpass_get_servers(item): itemlist = [] data = httptools.downloadpage(item.url).data.replace('\n', '') patron = r']+><\/iframe>' - url = scrapertoolsV2.find_single_match(data, patron).replace("?alta", "") + url = scrapertools.find_single_match(data, patron).replace("?alta", "") url = url.replace("&download=1", "") if 'https' not in url: url = 'https:' + url @@ -37,20 +37,20 @@ def hdpass_get_servers(item): patron_mir = '
    (.*?)
    ' patron_media = r'' - res = scrapertoolsV2.find_single_match(data, patron_res) + res = scrapertools.find_single_match(data, patron_res) itemlist = [] - for res_url, res_video in scrapertoolsV2.find_multiple_matches(res, '([^<]+?)'): + for res_url, res_video in scrapertools.find_multiple_matches(res, '([^<]+?)'): data = httptools.downloadpage(urlparse.urljoin(url, res_url)).data.replace('\n', '') - mir = scrapertoolsV2.find_single_match(data, patron_mir) + mir = scrapertools.find_single_match(data, patron_mir) - for mir_url, srv in scrapertoolsV2.find_multiple_matches(mir, '([^<]+?)'): + for mir_url, srv in scrapertools.find_multiple_matches(mir, '([^<]+?)'): data = httptools.downloadpage(urlparse.urljoin(url, mir_url)).data.replace('\n', '') - for media_label, media_url in scrapertoolsV2.find_multiple_matches(data, patron_media): + for media_label, media_url in scrapertools.find_multiple_matches(data, patron_media): itemlist.append(Item(channel=item.channel, action="play", fulltitle=item.fulltitle, @@ -168,13 +168,13 @@ def scrapeLang(scraped, lang, longtitle): return language, longtitle def cleantitle(title): - cleantitle = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(title).replace('"', "'").replace('×', 'x').replace('–', '-')).strip() + cleantitle = scrapertools.htmlclean(scrapertools.decodeHtmlentities(title).replace('"', "'").replace('×', 'x').replace('–', '-')).strip() return cleantitle def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang): itemlist = [] - log("scrapeBlock qui", block, patron) - matches = scrapertoolsV2.find_multiple_matches_groups(block, patron) + log("scrapeBlock qui") + matches = scrapertools.find_multiple_matches_groups(block, patron) log('MATCHES =', matches) if debug: @@ -214,7 +214,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t for kk in known_keys: val = match[listGroups.index(kk)] if kk in listGroups else '' if val and (kk == "url" or kk == 'thumb') and 'http' not in val: - val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val + val = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val scraped[kk] = val if scraped['season']: @@ -227,7 +227,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t episode = '' else: episode = re.sub(r'\s-\s|-|x|–|×|×', 'x', scraped['episode']) if scraped['episode'] else '' - second_episode = scrapertoolsV2.find_single_match(episode,'x\d+x(\d+)') + second_episode = scrapertools.find_single_match(episode, 'x\d+x(\d+)') if second_episode: episode = re.sub(r'(\d+x\d+)x\d+',r'\1-', episode) + second_episode.zfill(2) #episode = re.sub(r'\s-\s|-|x|–|×', 'x', scraped['episode']) if scraped['episode'] else '' @@ -257,18 +257,18 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t if scraped["plot"]: infolabels['plot'] = plot if scraped['duration']: - matches = scrapertoolsV2.find_multiple_matches(scraped['duration'], + matches = scrapertools.find_multiple_matches(scraped['duration'], r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)') for h, m in matches: scraped['duration'] = int(h) * 60 + int(m) if not matches: - scraped['duration'] = scrapertoolsV2.find_single_match(scraped['duration'], r'(\d+)') + scraped['duration'] = scrapertools.find_single_match(scraped['duration'], r'(\d+)') infolabels['duration'] = int(scraped['duration']) * 60 if scraped['genere']: - genres = scrapertoolsV2.find_multiple_matches(scraped['genere'], '[A-Za-z]+') + genres = scrapertools.find_multiple_matches(scraped['genere'], '[A-Za-z]+') infolabels['genere'] = ", ".join(genres) if scraped["rating"]: - infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"]) + infolabels['rating'] = scrapertools.decodeHtmlentities(scraped["rating"]) AC = CT = '' if typeContentDict: @@ -377,7 +377,18 @@ def scrape(func): log('PATRON= ', patron) if not data: - data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session).data.replace("'", '"') + page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session) + # if url may be changed and channel has findhost to update + if (not page.data or scrapertools.get_domain_from_url(page.url) != scrapertools.get_domain_from_url(item.url)) and 'findhost' in func.__globals__: + host = func.__globals__['findhost']() + parse = list(urlparse.urlparse(item.url)) + from core import jsontools + jsontools.update_node(host, func.__module__.split('.')[-1], 'url') + parse[1] = scrapertools.get_domain_from_url(host) + item.url = urlparse.urlunparse(parse) + page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, + session=item.session) + data = page.data.replace("'", '"') data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) # replace all ' with " and eliminate newline, so we don't need to worry about @@ -385,7 +396,7 @@ def scrape(func): if patronBlock: if debugBlock: regexDbg(item, patronBlock, headers, data) - blocks = scrapertoolsV2.find_multiple_matches_groups(data, patronBlock) + blocks = scrapertools.find_multiple_matches_groups(data, patronBlock) block = "" for bl in blocks: # log(len(blocks),bl) @@ -434,7 +445,7 @@ def scrape(func): if anime: if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold') else: autorenumber.renumber(itemlist) - if anime and autorenumber.check(item) == False and not scrapertoolsV2.find_single_match(itemlist[0].title, r'(\d+.\d+)'): + if anime and autorenumber.check(item) == False and not scrapertools.find_single_match(itemlist[0].title, r'(\d+.\d+)'): pass else: if addVideolibrary and (item.infoLabels["title"] or item.fulltitle): @@ -462,7 +473,7 @@ def dooplay_get_links(item, host): data = httptools.downloadpage(item.url).data.replace("'", '"') patron = r'
  • ([^<>]+)(?:([^<>]+))?' - matches = scrapertoolsV2.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(data, patron) ret = [] @@ -474,7 +485,7 @@ def dooplay_get_links(item, host): "type": type }) dataAdmin = httptools.downloadpage(host + '/wp-admin/admin-ajax.php', post=postData,headers={'Referer': item.url}).data - link = scrapertoolsV2.find_single_match(dataAdmin, "') if not match else match + match = scrapertools.find_single_match(data, r'') if not match else match if not match: from lib import jsunpack try: - data = scrapertoolsV2.find_single_match(data.replace('\n', ''), r"(eval\s?\(function\(p,a,c,k,e,d.*?)") + data = scrapertools.find_single_match(data.replace('\n', ''), r"(eval\s?\(function\(p,a,c,k,e,d.*?)") data = jsunpack.unpack(data) logger.debug("##### play /link/ unpack ##\n%s\n##" % data) except: logger.debug("##### The content is yet unpacked ##\n%s\n##" % data) - data = scrapertoolsV2.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";') + data = scrapertools.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";') data, c = unshortenit.unwrap_30x_only(data) else: data = match @@ -626,8 +637,8 @@ def menu(func): item = args['item'] host = func.__globals__['host'] - list_servers = func.__globals__['list_servers'] - list_quality = func.__globals__['list_quality'] + list_servers = func.__globals__['list_servers'] if 'list_servers' in func.__globals__ else 'directo' + list_quality = func.__globals__['list_quality'] if 'list_quality' in func.__globals__ else 'default' filename = func.__module__.split('.')[1] global_search = False # listUrls = ['film', 'filmSub', 'tvshow', 'tvshowSub', 'anime', 'animeSub', 'search', 'top', 'topSub'] @@ -744,7 +755,7 @@ def typo(string, typography=''): if 'submenu' in string: string = u"\u2022\u2022 ".encode('utf-8') + re.sub(r'\ssubmenu','',string) if 'color' in string: - color = scrapertoolsV2.find_single_match(string,'color ([a-z]+)') + color = scrapertools.find_single_match(string, 'color ([a-z]+)') if color == 'kod' or '': color = kod_color string = '[COLOR '+ color +']' + re.sub(r'\scolor\s([a-z]+)','',string) + '[/COLOR]' if 'bold' in string: @@ -776,13 +787,13 @@ def match(item, patron='', patronBlock='', headers='', url='', post=''): log('DATA= ', data) if patronBlock: - block = scrapertoolsV2.find_single_match(data, patronBlock) + block = scrapertools.find_single_match(data, patronBlock) log('BLOCK= ',block) else: block = data if patron: - matches = scrapertoolsV2.find_multiple_matches(block, patron) + matches = scrapertools.find_multiple_matches(block, patron) log('MATCHES= ',matches) return matches, block @@ -890,12 +901,12 @@ def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page= # If the call is direct, leave it blank action = inspect.stack()[function_or_level][3] if type(function_or_level) == int else function_or_level if next_page == '': - next_page = scrapertoolsV2.find_single_match(data, patron) + next_page = scrapertools.find_single_match(data, patron) if next_page != "": if resub: next_page = re.sub(resub[0], resub[1], next_page) if 'http' not in next_page: - next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page + next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page next_page = re.sub('&', '&',next_page) log('NEXT= ', next_page) itemlist.append( @@ -970,6 +981,7 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True): channel_node = autoplay_node.get(item.channel, {}) settings_node = channel_node.get('settings', {}) AP = get_setting('autoplay') or settings_node['active'] + APS = get_setting('autoplay_server_list') if CL and not AP: if get_setting('checklinks', item.channel): @@ -982,15 +994,27 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True): checklinks_number = get_setting('checklinks_number') itemlist = servertools.check_list_links(itemlist, checklinks_number) - if AutoPlay == True and inspect.stack()[4][3] != 'start_download': + if AutoPlay == True and not 'downloads' in inspect.stack()[3][1] + inspect.stack()[4][1]: autoplay.start(itemlist, item) if item.contentChannel != 'videolibrary': videolibrary(itemlist, item, function_level=3) if get_setting('downloadenabled') and down_load == True: download(itemlist, item, function_level=3) - return itemlist + + VL = False + try: + if 'downloads' in inspect.stack()[3][1] + inspect.stack()[4][1] or \ + inspect.stack()[4][3] == 'play_from_library' or \ + inspect.stack()[5][3] == 'play_from_library' or \ + 'videolibrary' in inspect.stack()[3][1] or \ + 'videolibrary' in inspect.stack()[4][1]: + VL = True + except: + pass + if not AP or VL or not APS: + return itemlist def filterLang(item, itemlist): - import channeltools + # import channeltools list_language = channeltools.get_lang(item.channel) if len(list_language) > 1: from specials import filtertools diff --git a/default.py b/default.py index 314f38af..e52e6d5c 100644 --- a/default.py +++ b/default.py @@ -5,17 +5,19 @@ import os import sys +from threading import Thread import xbmc from platformcode import config, logger -import ssl -logger.info(ssl.OPENSSL_VERSION) logger.info("init...") librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib')) sys.path.insert(0, librerias) +if not config.dev_mode(): + from platformcode import updater + Thread(target=updater.timer()) from platformcode import launcher diff --git a/lib/doh.py b/lib/doh.py new file mode 100644 index 00000000..93d8b12e --- /dev/null +++ b/lib/doh.py @@ -0,0 +1,77 @@ +# https://github.com/stamparm/python-doh +from __future__ import print_function + +import json +import re +import socket +import ssl +import subprocess +import sys + +PY3 = sys.version_info >= (3, 0) + +if hasattr(ssl, "_create_unverified_context"): + ssl._create_default_https_context = ssl._create_unverified_context + DOH_SERVER = "1.1.1.1" # Note: to prevent potential blocking of service based on DNS name +else: + DOH_SERVER = "cloudflare-dns.com" # Alternative servers: doh.securedns.eu, doh-de.blahdns.com, doh-jp.blahdns.com + +if PY3: + import urllib.request + _urlopen = urllib.request.urlopen + _Request = urllib.request.Request +else: + import urllib2 + _urlopen = urllib2.urlopen + _Request = urllib2.Request + +def query(name, type='A', server=DOH_SERVER, path="/dns-query", fallback=True, verbose=False): + """ + Returns domain name query results retrieved by using DNS over HTTPS protocol + # Reference: https://developers.cloudflare.com/1.1.1.1/dns-over-https/json-format/ + >>> query("one.one.one.one", fallback=False) + ['1.0.0.1', '1.1.1.1'] + >>> query("one", "NS") + ['a.nic.one.', 'b.nic.one.', 'c.nic.one.', 'd.nic.one.'] + """ + + retval = None + + try: + req = _Request("https://%s%s?name=%s&type=%s" % (server, path, name, type), headers={"Accept": "application/dns-json"}) + content = _urlopen(req).read().decode() + reply = json.loads(content) + + if "Answer" in reply: + answer = json.loads(content)["Answer"] + retval = [_["data"] for _ in answer] + else: + retval = [] + except Exception as ex: + if verbose: + print("Exception occurred: '%s'" % ex) + + if retval is None and fallback: + if type == 'A': + try: + retval = socket.gethostbyname_ex(name)[2] + except (socket.error, IndexError): + pass + + if retval is None: + process = subprocess.Popen(("nslookup", "-q=%s" % type, name), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + content = (process.communicate()[0] or "").decode().replace("\r", "") + + if "\n\n" in content and "can't" not in content.lower(): + answer = content.split("\n\n", 1)[-1] + retval = re.findall(r"(?m)^%s.+= ([^=,\n]+)$" % re.escape(name), answer) or re.findall(r"Address: (.+)", answer) + + if not retval: + match = re.search(r"Addresses: ([\s\d.]+)", answer) + if match: + retval = re.split(r"\s+", match.group(1).strip()) + + if not PY3 and retval: + retval = [_.encode() for _ in retval] + + return retval \ No newline at end of file diff --git a/lib/gktools.py b/lib/gktools.py deleted file mode 100644 index 55574e21..00000000 --- a/lib/gktools.py +++ /dev/null @@ -1,315 +0,0 @@ -# -*- coding: utf-8 -*- - -''' -gktools son un conjunto de funciones para ayudar a resolver enlaces a videos con "protección GK". -Lo de protección gk dudo que exista, le he llamado así pq los primeros ejemplos vistos se eran gkpluginsphp y gkpedia. - -Características "GK" : -- Utiliza una cookie __cfduid -- Calcula un token criptográfico en función de un texto y una clave -- El texto se saca del html (por ejemplo de meta name="google-site-verification", pero puede ser más complejo) -- La clave para encriptar se calcula en js ofuscados que carga el html -- Se llama a otra url con una serie de parámetros, como el token, y de allí se obtienen los videos finales. - -Howto: -1- descargar página -2- extraer datos y calcular los necesarios -3- descargar segunda página con el token calculado -4- extraer videos - -El paso 2 es con diferencia el más variable y depende mucho de cada web/servidor! -Desofuscando los js se pueden ver los datos propios que necesita cada uno -(el texto a encriptar, la clave a usar, la url dónde hay que llamar y los parámetros) - -Ver ejemplos en el código de los canales animeyt y pelispedia - - -Created for Alfa-addon by Alfa Developers Team 2018 -''' - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -import os, base64, json, hashlib, urlparse -from core import httptools -from core import scrapertools -from platformcode import logger -from aadecode import decode as aadecode - -# Descarga página y captura la petición de cookie -# ----------------------------------------------- -def get_data_and_cookie(item, ck_name='__cfduid'): - - headers = {'Referer': item.referer} - resp = httptools.downloadpage(item.url, headers=headers, cookies=False) - # ~ with open('gk_play1.html', 'w') as f: f.write(resp.data); f.close() - - ck_value = '' - if ck_name != '': - for h in resp.headers: - ck = scrapertools.find_single_match(resp.headers[h], '%s=([^;]*)' % ck_name) - if ck: - ck_value = ck - break - - return resp.data, ck_value - - -# Descarga página usando una cookie concreta -# ------------------------------------------ -def get_data_with_cookie(url, ck_value='', referer='', ck_name='__cfduid'): - - headers = {'Cookie': ck_name+'='+ck_value} - if referer != '': headers['referer'] = referer - data = httptools.downloadpage(url, headers=headers, cookies=False).data - # ~ with open('gk_play2.html', 'w') as f: f.write(data); f.close() - - return data - - -# Descarga json usando una cookie concreta -# ---------------------------------------- -def get_data_json(url, post, ck_value='', referer='', ck_name='__cfduid'): - - headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': ck_name+'='+ck_value} - if referer != '': headers['referer'] = referer - - data = httptools.downloadpage(url, post=post, headers=headers, cookies=False).data - # ~ with open('gk_play3.html', 'w') as f: f.write(data); f.close() - - return data - - -# Obtiene link de una llamada javascript Play() o de la url -# --------------------------------------------------------- -def get_play_link_id(data, url): - - playparms = scrapertools.find_single_match(data, 'Play\("([^"]*)","([^"]*)","([^"]*)"') - if playparms: - link = playparms[0] - subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt' - else: - subtitle = '' - link = scrapertools.find_single_match(data, 'Play\("([^"]*)"') - if not link: - link = scrapertools.find_single_match(url, 'id=([^;]*)') - - return link, subtitle - - -# Extraer enlaces a videos de datos json -# -------------------------------------- -def extraer_enlaces_json(data, referer, subtitle=''): - itemlist = [] - - # Ejemplos: - # {"Animeyt":[{"file":"https:\/\/storage.googleapis.com\/my-project-yt-195318.appspot.com\/slow.mp4","type":"mp4","label":"1080p"}]} - # {"link":[{"link":"http:\/\/video8.narusaku.tv\/static\/720p\/2.1208982.2039540?md5=B64FKYNbFuWvxkGcSbtz2Q&expires=1528839657","label":"720p","type":"mp4"},{"link":"http:\/\/video5.narusaku.tv\/static\/480p\/2.1208982.2039540?md5=yhLG_3VghEUSd5YlCXOTBQ&expires=1528839657","label":"480p","type":"mp4","default":true},{"link":"http:\/\/video3.narusaku.tv\/static\/360p\/2.1208982.2039540?md5=vC0ZJkxRwV1rVBdeF7D4iA&expires=1528839657","label":"360p","type":"mp4"},{"link":"http:\/\/video2.narusaku.tv\/static\/240p\/2.1208982.2039540?md5=b-y_-rgrLMW7hJwFQSD8Tw&expires=1528839657","label":"240p","type":"mp4"}]} - # {"link":"https:\/\/storage.googleapis.com\/cloudflare-caching-pelispedia.appspot.com\/cache\/16050.mp4","type":"mp4"} - # {"Harbinger":[{"Harbinger":"...","type":"...","label":"..."}], ...} - - data = data.replace('"Harbinger"', '"file"') - - # Intentar como json - # ------------------ - try: - json_data = json.loads(data) - enlaces = analizar_enlaces_json(json_data) - for enlace in enlaces: - url = enlace['link'] if 'link' in enlace else enlace['file'] - if not url.startswith('http'): url = aadecode(base64.b64decode(url)) # necesario para "Harbinger" - if not url.startswith('http'): url = decode_rijndael(url) # post-"Harbinger" en algunos casos - tit = '' - if 'type' in enlace: tit += '[%s]' % enlace['type'] - if 'label' in enlace: tit += '[%s]' % enlace['label'] - if tit == '': tit = '.mp4' - - itemlist.append([tit, corregir_url(url, referer), 0, subtitle]) - - # Sino, intentar como texto - # ------------------------- - except: - matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"') - if matches: - for url, lbl, typ in matches: - itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, referer), 0, subtitle]) - else: - url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"') - if url: - itemlist.append(['.mp4', corregir_url(url, referer), 0, subtitle]) - - - return itemlist - - -# Función recursiva que busca videos en un diccionario -# ---------------------------------------------------- -def analizar_enlaces_json(d): - itemlist = [] - found = {} - for k, v in d.iteritems(): - if k in ['file','link','type','label'] and not isinstance(v, list): - found[k] = v - - if isinstance(v, list): - for l in v: - if isinstance(l, dict): itemlist += analizar_enlaces_json(l) - - if 'file' in found or 'link' in found: - itemlist.append(found) - - return itemlist - - -# Correcciones en las urls finales obtenidas -# ------------------------------------------ -def corregir_url(url, referer): - url = url.replace('\/', '/') - if 'chomikuj.pl/' in url: url += "|Referer=%s" % referer - return url - - - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -# Conversion tipo hexa que hay en el js -# ------------------------------------- -def toHex(txt): - ret = '' - for i in range(len(txt)): - ret += str(hex(ord(txt[i]))).replace('x','')[-2:] - return ret - - -# Subrutinas de encriptación -# -------------------------- - -def md5_dominio(url): # sutorimux/kubechi - h = hashlib.md5(urlparse.urlparse(url).netloc) - return h.hexdigest() - - -def transforma_gsv(gsv, valor): - llista = range(256) - a = 0 - for i in range(256): - a = (a + llista[i] + ord(gsv[i % len(gsv)]) ) % 256 - b = llista[i] - llista[i] = llista[a] - llista[a] = b - - ret = '' - a = 0; b= 0 - for i in range(len(valor)): - a = (a + 1) % 256 - b = (b + llista[a]) % 256 - c = llista[a] - llista[a] = llista[b] - llista[b] = c - ret += chr(ord(valor[i]) ^ llista[(llista[a] + llista[b]) % 256]) - - return base64.b64encode(ret) - - - -# Codificar/Decodificar con Rijndael -# ---------------------------------- - -def encode_rijndael(msg, IV, key): - import rijndael - return rijndael.cbc_encrypt(msg, IV, key) - - -def decode_rijndael(txt, preIV='b3512f4972d314da9', key='3e1a854e7d5835ab99d99a29afec8bbb'): - import rijndael - msg = base64.b64decode(txt[:-15]) - IV = preIV + txt[-15:] - deco = rijndael.cbc_decrypt(msg, IV, key) - return deco.replace(chr(0), '') - - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -# Generar un token válido a partir de un texto y una clave -# -------------------------------------------------------- - -# gsv: google-site-verification, obtenido de '> 0x2] - segon = _0x382d28[((valors[0] & 0x3) << 0x4) | (valors[1] >> 0x4)] - tercer = _0x382d28[((valors[1] & 0xf) << 0x2) | (valors[2] >> 0x6)] - quart = _0x382d28[valors[2] & 0x3f] - retorn += primer + segon + tercer + quart - - valors = [0, 0, 0] - cicle = 0 - - return retorn - - -def obtener_cripto(password, plaintext): - salt = os.urandom(8) - - paddingLength = len(plaintext) % 16 - if paddingLength == 0: - paddedPlaintext = plaintext - else: - dif = 16 - paddingLength - paddedPlaintext = plaintext + chr(dif)*dif - - kdf = evpKDF(password, salt) - iv = kdf['iv'] - - try: # Intentar con librería AES del sistema - from Crypto.Cipher import AES - cipherSpec = AES.new(kdf['key'], AES.MODE_CBC, iv) - except: # Si falla intentar con librería del addon - import jscrypto - cipherSpec = jscrypto.new(kdf['key'], jscrypto.MODE_CBC, iv) - ciphertext = cipherSpec.encrypt(paddedPlaintext) - - return json.dumps({'ct': base64.b64encode(ciphertext), 'iv': iv.encode("hex"), 's': salt.encode("hex")}, sort_keys=True, separators=(',', ':')) - - -def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"): - target_key_size = key_size + iv_size - derived_bytes = "" - number_of_derived_words = 0 - block = None - hasher = hashlib.new(hash_algorithm) - while number_of_derived_words < target_key_size: - if block is not None: - hasher.update(block) - - hasher.update(passwd) - hasher.update(salt) - block = hasher.digest() - hasher = hashlib.new(hash_algorithm) - - for i in range(1, iterations): - hasher.update(block) - block = hasher.digest() - hasher = hashlib.new(hash_algorithm) - - derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)] - - number_of_derived_words += len(block)/4 - - return { - "key": derived_bytes[0: key_size * 4], - "iv": derived_bytes[key_size * 4:] - } diff --git a/lib/jjdecode.py b/lib/jjdecode.py deleted file mode 100644 index be91f887..00000000 --- a/lib/jjdecode.py +++ /dev/null @@ -1,312 +0,0 @@ -# -*- coding: utf-8 -*- -#!/usr/bin/env python -# -# Python version of the jjdecode function written by Syed Zainudeen -# http://csc.cs.utm.my/syed/images/files/jjdecode/jjdecode.html -# -# +NCR/CRC! [ReVeRsEr] - crackinglandia@gmail.com -# Thanks to Jose Miguel Esparza (@EternalTodo) for the final push to make it work! -# - -import re - -class JJDecoder(object): - - def __init__(self, jj_encoded_data): - self.encoded_str = jj_encoded_data - - - def clean(self): - return re.sub('^\s+|\s+$', '', self.encoded_str) - - - def checkPalindrome(self, Str): - startpos = -1 - endpos = -1 - gv, gvl = -1, -1 - - index = Str.find('"\'\\"+\'+",') - - if index == 0: - startpos = Str.find('$$+"\\""+') + 8 - endpos = Str.find('"\\"")())()') - gv = Str[Str.find('"\'\\"+\'+",')+9:Str.find('=~[]')] - gvl = len(gv) - else: - gv = Str[0:Str.find('=')] - gvl = len(gv) - startpos = Str.find('"\\""+') + 5 - endpos = Str.find('"\\"")())()') - - return (startpos, endpos, gv, gvl) - - - def decode(self): - - self.encoded_str = self.clean() - startpos, endpos, gv, gvl = self.checkPalindrome(self.encoded_str) - - if startpos == endpos: - raise Exception('No data!') - - data = self.encoded_str[startpos:endpos] - - b = ['___+', '__$+', '_$_+', '_$$+', '$__+', '$_$+', '$$_+', '$$$+', '$___+', '$__$+', '$_$_+', '$_$$+', '$$__+', '$$_$+', '$$$_+', '$$$$+'] - - str_l = '(![]+"")[' + gv + '._$_]+' - str_o = gv + '._$+' - str_t = gv + '.__+' - str_u = gv + '._+' - - str_hex = gv + '.' - - str_s = '"' - gvsig = gv + '.' - - str_quote = '\\\\\\"' - str_slash = '\\\\\\\\' - - str_lower = '\\\\"+' - str_upper = '\\\\"+' + gv + '._+' - - str_end = '"+' - - out = '' - while data != '': - # l o t u - if data.find(str_l) == 0: - data = data[len(str_l):] - out += 'l' - continue - elif data.find(str_o) == 0: - data = data[len(str_o):] - out += 'o' - continue - elif data.find(str_t) == 0: - data = data[len(str_t):] - out += 't' - continue - elif data.find(str_u) == 0: - data = data[len(str_u):] - out += 'u' - continue - - # 0123456789abcdef - if data.find(str_hex) == 0: - data = data[len(str_hex):] - - for i in range(len(b)): - if data.find(b[i]) == 0: - data = data[len(b[i]):] - out += '%x' % i - break - continue - - # start of s block - if data.find(str_s) == 0: - data = data[len(str_s):] - - # check if "R - if data.find(str_upper) == 0: # r4 n >= 128 - data = data[len(str_upper):] # skip sig - ch_str = '' - for i in range(2): # shouldn't be more than 2 hex chars - # gv + "."+b[ c ] - if data.find(gvsig) == 0: - data = data[len(gvsig):] - for k in range(len(b)): # for every entry in b - if data.find(b[k]) == 0: - data = data[len(b[k]):] - ch_str = '%x' % k - break - else: - break - - out += chr(int(ch_str, 16)) - continue - - elif data.find(str_lower) == 0: # r3 check if "R // n < 128 - data = data[len(str_lower):] # skip sig - - ch_str = '' - ch_lotux = '' - temp = '' - b_checkR1 = 0 - for j in range(3): # shouldn't be more than 3 octal chars - if j > 1: # lotu check - if data.find(str_l) == 0: - data = data[len(str_l):] - ch_lotux = 'l' - break - elif data.find(str_o) == 0: - data = data[len(str_o):] - ch_lotux = 'o' - break - elif data.find(str_t) == 0: - data = data[len(str_t):] - ch_lotux = 't' - break - elif data.find(str_u) == 0: - data = data[len(str_u):] - ch_lotux = 'u' - break - - # gv + "."+b[ c ] - if data.find(gvsig) == 0: - temp = data[len(gvsig):] - for k in range(8): # for every entry in b octal - if temp.find(b[k]) == 0: - if int(ch_str + str(k), 8) > 128: - b_checkR1 = 1 - break - - ch_str += str(k) - data = data[len(gvsig):] # skip gvsig - data = data[len(b[k]):] - break - - if b_checkR1 == 1: - if data.find(str_hex) == 0: # 0123456789abcdef - data = data[len(str_hex):] - # check every element of hex decode string for a match - for i in range(len(b)): - if data.find(b[i]) == 0: - data = data[len(b[i]):] - ch_lotux = '%x' % i - break - break - else: - break - - out += chr(int(ch_str,8)) + ch_lotux - continue - - else: # "S ----> "SR or "S+ - # if there is, loop s until R 0r + - # if there is no matching s block, throw error - - match = 0; - n = None - - # searching for matching pure s block - while True: - n = ord(data[0]) - if data.find(str_quote) == 0: - data = data[len(str_quote):] - out += '"' - match += 1 - continue - elif data.find(str_slash) == 0: - data = data[len(str_slash):] - out += '\\' - match += 1 - continue - elif data.find(str_end) == 0: # reached end off S block ? + - if match == 0: - raise '+ no match S block: ' + data - data = data[len(str_end):] - break # step out of the while loop - elif data.find(str_upper) == 0: # r4 reached end off S block ? - check if "R n >= 128 - if match == 0: - raise 'no match S block n>128: ' + data - data = data[len(str_upper):] # skip sig - - ch_str = '' - ch_lotux = '' - - for j in range(10): # shouldn't be more than 10 hex chars - if j > 1: # lotu check - if data.find(str_l) == 0: - data = data[len(str_l):] - ch_lotux = 'l' - break - elif data.find(str_o) == 0: - data = data[len(str_o):] - ch_lotux = 'o' - break - elif data.find(str_t) == 0: - data = data[len(str_t):] - ch_lotux = 't' - break - elif data.find(str_u) == 0: - data = data[len(str_u):] - ch_lotux = 'u' - break - - # gv + "."+b[ c ] - if data.find(gvsig) == 0: - data = data[len(gvsig):] # skip gvsig - for k in range(len(b)): # for every entry in b - if data.find(b[k]) == 0: - data = data[len(b[k]):] - ch_str += '%x' % k - break - else: - break # done - out += chr(int(ch_str, 16)) - break # step out of the while loop - elif data.find(str_lower) == 0: # r3 check if "R // n < 128 - if match == 0: - raise 'no match S block n<128: ' + data - - data = data[len(str_lower):] # skip sig - - ch_str = '' - ch_lotux = '' - temp = '' - b_checkR1 = 0 - - for j in range(3): # shouldn't be more than 3 octal chars - if j > 1: # lotu check - if data.find(str_l) == 0: - data = data[len(str_l):] - ch_lotux = 'l' - break - elif data.find(str_o) == 0: - data = data[len(str_o):] - ch_lotux = 'o' - break - elif data.find(str_t) == 0: - data = data[len(str_t):] - ch_lotux = 't' - break - elif data.find(str_u) == 0: - data = data[len(str_u):] - ch_lotux = 'u' - break - - # gv + "."+b[ c ] - if data.find(gvsig) == 0: - temp = data[len(gvsig):] - for k in range(8): # for every entry in b octal - if temp.find(b[k]) == 0: - if int(ch_str + str(k), 8) > 128: - b_checkR1 = 1 - break - - ch_str += str(k) - data = data[len(gvsig):] # skip gvsig - data = data[len(b[k]):] - break - - if b_checkR1 == 1: - if data.find(str_hex) == 0: # 0123456789abcdef - data = data[len(str_hex):] - # check every element of hex decode string for a match - for i in range(len(b)): - if data.find(b[i]) == 0: - data = data[len(b[i]):] - ch_lotux = '%x' % i - break - else: - break - out += chr(int(ch_str, 8)) + ch_lotux - break # step out of the while loop - elif (0x21 <= n and n <= 0x2f) or (0x3A <= n and n <= 0x40) or ( 0x5b <= n and n <= 0x60 ) or ( 0x7b <= n and n <= 0x7f ): - out += data[0] - data = data[1:] - match += 1 - continue - print 'No match : ' + data - break - return out diff --git a/lib/js2py/base.py b/lib/js2py/base.py index 67c80d59..cf1eca08 100644 --- a/lib/js2py/base.py +++ b/lib/js2py/base.py @@ -5,6 +5,7 @@ import re from .translators.friendly_nodes import REGEXP_CONVERTER from .utils.injector import fix_js_args from types import FunctionType, ModuleType, GeneratorType, BuiltinFunctionType, MethodType, BuiltinMethodType +from math import floor, log10 import traceback try: import numpy @@ -603,15 +604,7 @@ class PyJs(object): elif typ == 'Boolean': return Js('true') if self.value else Js('false') elif typ == 'Number': #or self.Class=='Number': - if self.is_nan(): - return Js('NaN') - elif self.is_infinity(): - sign = '-' if self.value < 0 else '' - return Js(sign + 'Infinity') - elif isinstance(self.value, - long) or self.value.is_integer(): # dont print .0 - return Js(unicode(int(self.value))) - return Js(unicode(self.value)) # accurate enough + return Js(unicode(js_dtoa(self.value))) elif typ == 'String': return self else: #object @@ -1046,7 +1039,7 @@ def PyJsComma(a, b): return b -from .internals.simplex import JsException as PyJsException +from .internals.simplex import JsException as PyJsException, js_dtoa import pyjsparser pyjsparser.parser.ENABLE_JS2PY_ERRORS = lambda msg: MakeError('SyntaxError', msg) diff --git a/lib/js2py/evaljs.py b/lib/js2py/evaljs.py index 3f5eeee5..ef4d7d95 100644 --- a/lib/js2py/evaljs.py +++ b/lib/js2py/evaljs.py @@ -116,36 +116,52 @@ def eval_js(js): def eval_js6(js): + """Just like eval_js but with experimental support for js6 via babel.""" return eval_js(js6_to_js5(js)) def translate_js6(js): + """Just like translate_js but with experimental support for js6 via babel.""" return translate_js(js6_to_js5(js)) class EvalJs(object): """This class supports continuous execution of javascript under same context. - >>> js = EvalJs() - >>> js.execute('var a = 10;function f(x) {return x*x};') - >>> js.f(9) + >>> ctx = EvalJs() + >>> ctx.execute('var a = 10;function f(x) {return x*x};') + >>> ctx.f(9) 81 - >>> js.a + >>> ctx.a 10 context is a python dict or object that contains python variables that should be available to JavaScript For example: - >>> js = EvalJs({'a': 30}) - >>> js.execute('var x = a') - >>> js.x + >>> ctx = EvalJs({'a': 30}) + >>> ctx.execute('var x = a') + >>> ctx.x 30 + You can enable JS require function via enable_require. With this feature enabled you can use js modules + from npm, for example: + >>> ctx = EvalJs(enable_require=True) + >>> ctx.execute("var esprima = require('esprima');") + >>> ctx.execute("esprima.parse('var a = 1')") + You can run interactive javascript console with console method!""" - def __init__(self, context={}): + def __init__(self, context={}, enable_require=False): self.__dict__['_context'] = {} exec (DEFAULT_HEADER, self._context) self.__dict__['_var'] = self._context['var'].to_python() + + if enable_require: + def _js_require_impl(npm_module_name): + from .node_import import require + from .base import to_python + return require(to_python(npm_module_name), context=self._context) + setattr(self._var, 'require', _js_require_impl) + if not isinstance(context, dict): try: context = context.__dict__ diff --git a/lib/js2py/internals/constructors/jsfunction.py b/lib/js2py/internals/constructors/jsfunction.py index 9728fb38..738554a0 100644 --- a/lib/js2py/internals/constructors/jsfunction.py +++ b/lib/js2py/internals/constructors/jsfunction.py @@ -7,7 +7,7 @@ from ..byte_trans import ByteCodeGenerator, Code def Function(this, args): # convert arguments to python list of strings - a = map(to_string, tuple(args)) + a = list(map(to_string, tuple(args))) _body = u';' _args = () if len(a): @@ -42,6 +42,7 @@ def executable_code(code_str, space, global_context=True): space.byte_generator.emit('LABEL', skip) space.byte_generator.emit('NOP') space.byte_generator.restore_state() + space.byte_generator.exe.compile( start_loc=old_tape_len ) # dont read the code from the beginning, dont be stupid! @@ -71,5 +72,5 @@ def _eval(this, args): def log(this, args): - print ' '.join(map(to_string, args)) + print(' '.join(map(to_string, args))) return undefined diff --git a/lib/js2py/internals/prototypes/jsstring.py b/lib/js2py/internals/prototypes/jsstring.py index b56246e2..be38802e 100644 --- a/lib/js2py/internals/prototypes/jsstring.py +++ b/lib/js2py/internals/prototypes/jsstring.py @@ -4,7 +4,7 @@ from __future__ import unicode_literals import re from ..conversions import * from ..func_utils import * -from jsregexp import RegExpExec +from .jsregexp import RegExpExec DIGS = set(u'0123456789') WHITE = u"\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF" diff --git a/lib/js2py/node_import.py b/lib/js2py/node_import.py index a49a1f51..605c4b3a 100644 --- a/lib/js2py/node_import.py +++ b/lib/js2py/node_import.py @@ -1,6 +1,6 @@ __all__ = ['require'] import subprocess, os, codecs, glob -from .evaljs import translate_js +from .evaljs import translate_js, DEFAULT_HEADER import six DID_INIT = False DIRNAME = os.path.dirname(os.path.abspath(__file__)) @@ -15,7 +15,7 @@ def _init(): 'node -v', shell=True, cwd=DIRNAME ) == 0, 'You must have node installed! run: brew install node' assert subprocess.call( - 'cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify' + 'cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify browserify-shim' % repr(DIRNAME), shell=True, cwd=DIRNAME) == 0, 'Could not link required node_modules' @@ -46,12 +46,18 @@ GET_FROM_GLOBALS_FUNC = ''' ''' +def _get_module_py_name(module_name): + return module_name.replace('-', '_') -def require(module_name, include_polyfill=False, update=False): +def _get_module_var_name(module_name): + return _get_module_py_name(module_name).rpartition('/')[-1] + + +def _get_and_translate_npm_module(module_name, include_polyfill=False, update=False): assert isinstance(module_name, str), 'module_name must be a string!' - py_name = module_name.replace('-', '_') + py_name = _get_module_py_name(module_name) module_filename = '%s.py' % py_name - var_name = py_name.rpartition('/')[-1] + var_name = _get_module_var_name(module_name) if not os.path.exists(os.path.join(PY_NODE_MODULES_PATH, module_filename)) or update: _init() @@ -77,7 +83,7 @@ def require(module_name, include_polyfill=False, update=False): # convert the module assert subprocess.call( - '''node -e "(require('browserify')('./%s').bundle(function (err,data) {fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"''' + '''node -e "(require('browserify')('./%s').bundle(function (err,data) {if (err) {console.log(err);throw new Error(err);};fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"''' % (in_file_name, out_file_name), shell=True, cwd=DIRNAME, @@ -88,7 +94,8 @@ def require(module_name, include_polyfill=False, update=False): "utf-8") as f: js_code = f.read() os.remove(os.path.join(DIRNAME, out_file_name)) - + if len(js_code) < 50: + raise RuntimeError("Candidate JS bundle too short - likely browserify issue.") js_code += GET_FROM_GLOBALS_FUNC js_code += ';var %s = getFromGlobals(%s);%s' % ( var_name, repr(module_name), var_name) @@ -107,7 +114,32 @@ def require(module_name, include_polyfill=False, update=False): os.path.join(PY_NODE_MODULES_PATH, module_filename), "r", "utf-8") as f: py_code = f.read() + return py_code - context = {} + +def require(module_name, include_polyfill=False, update=False, context=None): + """ + Installs the provided npm module, exports a js bundle via browserify, converts to ECMA 5.1 via babel and + finally translates the generated JS bundle to Python via Js2Py. + Returns a pure python object that behaves like the installed module. Nice! + + :param module_name: Name of the npm module to require. For example 'esprima'. + :param include_polyfill: Whether the babel-polyfill should be included as part of the translation. May be needed + for some modules that use unsupported features. + :param update: Whether to force update the translation. Otherwise uses a cached version if exists. + :param context: Optional context in which the translated module should be executed in. If provided, the + header (js2py imports) will be skipped as it is assumed that the context already has all the necessary imports. + :return: The JsObjectWrapper containing the translated module object. Can be used like a standard python object. + """ + py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=update) + # this is a bit hacky but we need to strip the default header from the generated code... + if context is not None: + if not py_code.startswith(DEFAULT_HEADER): + # new header version? retranslate... + assert not update, "Unexpected header." + py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=True) + assert py_code.startswith(DEFAULT_HEADER), "Unexpected header." + py_code = py_code[len(DEFAULT_HEADER):] + context = {} if context is None else context exec (py_code, context) - return context['var'][var_name].to_py() + return context['var'][_get_module_var_name(module_name)].to_py() diff --git a/lib/js2py/prototypes/jsfunction.py b/lib/js2py/prototypes/jsfunction.py index f9598a31..2ed417e0 100644 --- a/lib/js2py/prototypes/jsfunction.py +++ b/lib/js2py/prototypes/jsfunction.py @@ -6,8 +6,6 @@ if six.PY3: xrange = range unicode = str -# todo fix apply and bind - class FunctionPrototype: def toString(): @@ -41,6 +39,7 @@ class FunctionPrototype: return this.call(obj, args) def bind(thisArg): + arguments_ = arguments target = this if not target.is_callable(): raise this.MakeError( @@ -48,5 +47,5 @@ class FunctionPrototype: if len(arguments) <= 1: args = () else: - args = tuple([arguments[e] for e in xrange(1, len(arguments))]) + args = tuple([arguments_[e] for e in xrange(1, len(arguments_))]) return this.PyJsBoundFunction(target, thisArg, args) diff --git a/lib/js2py/test_internals.py b/lib/js2py/test_internals.py deleted file mode 100644 index 12cf4ad7..00000000 --- a/lib/js2py/test_internals.py +++ /dev/null @@ -1,9 +0,0 @@ -from internals import byte_trans -from internals import seval -import pyjsparser - -x = r''' -function g() {var h123 = 11; return [function g1() {return h123}, new Function('return h123')]} -g()[1]() -''' -print seval.eval_js_vm(x) diff --git a/lib/js2py/translators/translating_nodes.py b/lib/js2py/translators/translating_nodes.py index b9eea29f..371c8ede 100644 --- a/lib/js2py/translators/translating_nodes.py +++ b/lib/js2py/translators/translating_nodes.py @@ -155,7 +155,7 @@ def limited(func): inf = float('inf') -def Literal(type, value, raw, regex=None, comments=None): +def Literal(type, value, raw, regex=None): if regex: # regex return 'JsRegExp(%s)' % repr(compose_regex(value)) elif value is None: # null @@ -165,12 +165,12 @@ def Literal(type, value, raw, regex=None, comments=None): return 'Js(%s)' % repr(value) if value != inf else 'Js(float("inf"))' -def Identifier(type, name, comments=None): +def Identifier(type, name): return 'var.get(%s)' % repr(name) @limited -def MemberExpression(type, computed, object, property, comments=None): +def MemberExpression(type, computed, object, property): far_left = trans(object) if computed: # obj[prop] type accessor # may be literal which is the same in every case so we can save some time on conversion @@ -183,12 +183,12 @@ def MemberExpression(type, computed, object, property, comments=None): return far_left + '.get(%s)' % prop -def ThisExpression(type, comments=None): +def ThisExpression(type): return 'var.get(u"this")' @limited -def CallExpression(type, callee, arguments, comments=None): +def CallExpression(type, callee, arguments): arguments = [trans(e) for e in arguments] if callee['type'] == 'MemberExpression': far_left = trans(callee['object']) @@ -210,38 +210,47 @@ def CallExpression(type, callee, arguments, comments=None): # ========== ARRAYS ============ -def ArrayExpression(type, elements, comments=None): # todo fix null inside problem +def ArrayExpression(type, elements): # todo fix null inside problem return 'Js([%s])' % ', '.join(trans(e) if e else 'None' for e in elements) # ========== OBJECTS ============= -def ObjectExpression(type, properties, comments=None): - name = inline_stack.require('Object') +def ObjectExpression(type, properties): + name = None elems = [] after = '' for p in properties: if p['kind'] == 'init': elems.append('%s:%s' % Property(**p)) - elif p['kind'] == 'set': - k, setter = Property( - **p - ) # setter is just a lval referring to that function, it will be defined in InlineStack automatically - after += '%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n' % ( - name, k, setter) - elif p['kind'] == 'get': - k, getter = Property(**p) - after += '%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n' % ( - name, k, getter) else: - raise RuntimeError('Unexpected object propery kind') - obj = '%s = Js({%s})\n' % (name, ','.join(elems)) - inline_stack.define(name, obj + after) - return name + if name is None: + name = inline_stack.require('Object') + if p['kind'] == 'set': + k, setter = Property( + **p + ) # setter is just a lval referring to that function, it will be defined in InlineStack automatically + after += '%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n' % ( + name, k, setter) + elif p['kind'] == 'get': + k, getter = Property(**p) + after += '%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n' % ( + name, k, getter) + else: + raise RuntimeError('Unexpected object propery kind') + definition = 'Js({%s})' % ','.join(elems) + if name is None: + return definition + body = '%s = %s\n' % (name, definition) + body += after + body += 'return %s\n' % name + code = 'def %s():\n%s' % (name, indent(body)) + inline_stack.define(name, code) + return name + '()' -def Property(type, kind, key, computed, value, method, shorthand, comments=None): +def Property(type, kind, key, computed, value, method, shorthand): if shorthand or computed: raise NotImplementedError( 'Shorthand and Computed properties not implemented!') @@ -256,7 +265,7 @@ def Property(type, kind, key, computed, value, method, shorthand, comments=None) @limited -def UnaryExpression(type, operator, argument, prefix, comments=None): +def UnaryExpression(type, operator, argument, prefix): a = trans( argument, standard=True ) # unary involve some complex operations so we cant use line shorteners here @@ -271,7 +280,7 @@ def UnaryExpression(type, operator, argument, prefix, comments=None): @limited -def BinaryExpression(type, operator, left, right, comments=None): +def BinaryExpression(type, operator, left, right): a = trans(left) b = trans(right) # delegate to our friends @@ -279,7 +288,7 @@ def BinaryExpression(type, operator, left, right, comments=None): @limited -def UpdateExpression(type, operator, argument, prefix, comments=None): +def UpdateExpression(type, operator, argument, prefix): a = trans( argument, standard=True ) # also complex operation involving parsing of the result so no line length reducing here @@ -287,7 +296,7 @@ def UpdateExpression(type, operator, argument, prefix, comments=None): @limited -def AssignmentExpression(type, operator, left, right, comments=None): +def AssignmentExpression(type, operator, left, right): operator = operator[:-1] if left['type'] == 'Identifier': if operator: @@ -319,12 +328,12 @@ six @limited -def SequenceExpression(type, expressions, comments=None): +def SequenceExpression(type, expressions): return reduce(js_comma, (trans(e) for e in expressions)) @limited -def NewExpression(type, callee, arguments, comments=None): +def NewExpression(type, callee, arguments): return trans(callee) + '.create(%s)' % ', '.join( trans(e) for e in arguments) @@ -332,7 +341,7 @@ def NewExpression(type, callee, arguments, comments=None): @limited def ConditionalExpression( type, test, consequent, - alternate, comments=None): # caused plenty of problems in my home-made translator :) + alternate): # caused plenty of problems in my home-made translator :) return '(%s if %s else %s)' % (trans(consequent), trans(test), trans(alternate)) @@ -340,49 +349,49 @@ def ConditionalExpression( # =========== STATEMENTS ============= -def BlockStatement(type, body, comments=None): +def BlockStatement(type, body): return StatementList( body) # never returns empty string! In the worst case returns pass\n -def ExpressionStatement(type, expression, comments=None): +def ExpressionStatement(type, expression): return trans(expression) + '\n' # end expression space with new line -def BreakStatement(type, label, comments=None): +def BreakStatement(type, label): if label: return 'raise %s("Breaked")\n' % (get_break_label(label['name'])) else: return 'break\n' -def ContinueStatement(type, label, comments=None): +def ContinueStatement(type, label): if label: return 'raise %s("Continued")\n' % (get_continue_label(label['name'])) else: return 'continue\n' -def ReturnStatement(type, argument, comments=None): +def ReturnStatement(type, argument): return 'return %s\n' % (trans(argument) if argument else "var.get('undefined')") -def EmptyStatement(type, comments=None): +def EmptyStatement(type): return 'pass\n' -def DebuggerStatement(type, comments=None): +def DebuggerStatement(type): return 'pass\n' -def DoWhileStatement(type, body, test, comments=None): +def DoWhileStatement(type, body, test): inside = trans(body) + 'if not %s:\n' % trans(test) + indent('break\n') result = 'while 1:\n' + indent(inside) return result -def ForStatement(type, init, test, update, body, comments=None): +def ForStatement(type, init, test, update, body): update = indent(trans(update)) if update else '' init = trans(init) if init else '' if not init.endswith('\n'): @@ -398,7 +407,7 @@ def ForStatement(type, init, test, update, body, comments=None): return result -def ForInStatement(type, left, right, body, each, comments=None): +def ForInStatement(type, left, right, body, each): res = 'for PyJsTemp in %s:\n' % trans(right) if left['type'] == "VariableDeclaration": addon = trans(left) # make sure variable is registered @@ -417,7 +426,7 @@ def ForInStatement(type, left, right, body, each, comments=None): return res -def IfStatement(type, test, consequent, alternate, comments=None): +def IfStatement(type, test, consequent, alternate): # NOTE we cannot do elif because function definition inside elif statement would not be possible! IF = 'if %s:\n' % trans(test) IF += indent(trans(consequent)) @@ -427,7 +436,7 @@ def IfStatement(type, test, consequent, alternate, comments=None): return IF + ELSE -def LabeledStatement(type, label, body, comments=None): +def LabeledStatement(type, label, body): # todo consider using smarter approach! inside = trans(body) defs = '' @@ -448,7 +457,7 @@ def LabeledStatement(type, label, body, comments=None): return defs + inside -def StatementList(lis, comments=None): +def StatementList(lis): if lis: # ensure we don't return empty string because it may ruin indentation! code = ''.join(trans(e) for e in lis) return code if code else 'pass\n' @@ -456,7 +465,7 @@ def StatementList(lis, comments=None): return 'pass\n' -def PyimportStatement(type, imp, comments=None): +def PyimportStatement(type, imp): lib = imp['name'] jlib = 'PyImport_%s' % lib code = 'import %s as %s\n' % (lib, jlib) @@ -471,7 +480,7 @@ def PyimportStatement(type, imp, comments=None): return code -def SwitchStatement(type, discriminant, cases, comments=None): +def SwitchStatement(type, discriminant, cases): #TODO there will be a problem with continue in a switch statement.... FIX IT code = 'while 1:\n' + indent('SWITCHED = False\nCONDITION = (%s)\n') code = code % trans(discriminant) @@ -491,12 +500,12 @@ def SwitchStatement(type, discriminant, cases, comments=None): return code -def ThrowStatement(type, argument, comments=None): +def ThrowStatement(type, argument): return 'PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n' % trans( argument) -def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer, comments=None): +def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer): result = 'try:\n%s' % indent(trans(block)) # complicated catch statement... if handler: @@ -516,13 +525,13 @@ def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer, com return result -def LexicalDeclaration(type, declarations, kind, comments=None): +def LexicalDeclaration(type, declarations, kind): raise NotImplementedError( 'let and const not implemented yet but they will be soon! Check github for updates.' ) -def VariableDeclarator(type, id, init, comments=None): +def VariableDeclarator(type, id, init): name = id['name'] # register the name if not already registered Context.register(name) @@ -531,21 +540,21 @@ def VariableDeclarator(type, id, init, comments=None): return '' -def VariableDeclaration(type, declarations, kind, comments=None): +def VariableDeclaration(type, declarations, kind): code = ''.join(trans(d) for d in declarations) return code if code else 'pass\n' -def WhileStatement(type, test, body, comments=None): +def WhileStatement(type, test, body): result = 'while %s:\n' % trans(test) + indent(trans(body)) return result -def WithStatement(type, object, body, comments=None): +def WithStatement(type, object, body): raise NotImplementedError('With statement not implemented!') -def Program(type, body, comments=None): +def Program(type, body): inline_stack.reset() code = ''.join(trans(e) for e in body) # here add hoisted elements (register variables and define functions) @@ -559,7 +568,7 @@ def Program(type, body, comments=None): def FunctionDeclaration(type, id, params, defaults, body, generator, - expression, comments=None): + expression): if generator: raise NotImplementedError('Generators not supported') if defaults: @@ -610,7 +619,7 @@ def FunctionDeclaration(type, id, params, defaults, body, generator, def FunctionExpression(type, id, params, defaults, body, generator, - expression, comments=None): + expression): if generator: raise NotImplementedError('Generators not supported') if defaults: diff --git a/lib/js2py/utils/injector.py b/lib/js2py/utils/injector.py index dd714a48..ea236d5e 100644 --- a/lib/js2py/utils/injector.py +++ b/lib/js2py/utils/injector.py @@ -115,7 +115,16 @@ def append_arguments(code_obj, new_locals): code_obj.co_freevars, code_obj.co_cellvars) # Done modifying codestring - make the code object - return types.CodeType(*args) + if hasattr(code_obj, "replace"): + # Python 3.8+ + return code_obj.replace( + co_argcount=co_argcount + new_locals_len, + co_nlocals=code_obj.co_nlocals + new_locals_len, + co_code=code, + co_names=names, + co_varnames=varnames) + else: + return types.CodeType(*args) def instructions(code_obj): diff --git a/lib/jsc.py b/lib/jsc.py deleted file mode 100644 index 8d1baf79..00000000 --- a/lib/jsc.py +++ /dev/null @@ -1,83 +0,0 @@ -MAPPING = { - 'a': '(false+"")[1]', - 'b': '([]["entries"]()+"")[2]', - 'c': '([]["fill"]+"")[3]', - 'd': '(undefined+"")[2]', - 'e': '(true+"")[3]', - 'f': '(false+"")[0]', - 'g': '(false+[0]+String)[20]', - 'h': '(+(101))["to"+String["name"]](21)[1]', - 'i': '([false]+undefined)[10]', - 'j': '([]["entries"]()+"")[3]', - 'k': '(+(20))["to"+String["name"]](21)', - 'l': '(false+"")[2]', - 'm': '(Number+"")[11]', - 'n': '(undefined+"")[1]', - 'o': '(true+[]["fill"])[10]', - 'p': '(+(211))["to"+String["name"]](31)[1]', - 'q': '(+(212))["to"+String["name"]](31)[1]', - 'r': '(true+"")[1]', - 's': '(false+"")[3]', - 't': '(true+"")[0]', - 'u': '(undefined+"")[0]', - 'v': '(+(31))["to"+String["name"]](32)', - 'w': '(+(32))["to"+String["name"]](33)', - 'x': '(+(101))["to"+String["name"]](34)[1]', - 'y': '(NaN+[Infinity])[10]', - 'z': '(+(35))["to"+String["name"]](36)', - 'A': '(+[]+Array)[10]', - 'B': '(+[]+Boolean)[10]', - 'C': 'Function("return escape")()(("")["italics"]())[2]', - 'D': 'Function("return escape")()([]["fill"])["slice"]("-1")', - 'E': '(RegExp+"")[12]', - 'F': '(+[]+Function)[10]', - 'G': '(false+Function("return Date")()())[30]', - 'I': '(Infinity+"")[0]', - 'M': '(true+Function("return Date")()())[30]', - 'N': '(NaN+"")[0]', - 'O': '(NaN+Function("return{}")())[11]', - 'R': '(+[]+RegExp)[10]', - 'S': '(+[]+String)[10]', - 'T': '(NaN+Function("return Date")()())[30]', - 'U': '(NaN+Function("return{}")()["to"+String["name"]]["call"]())[11]', - ' ': '(NaN+[]["fill"])[11]', - '"': '("")["fontcolor"]()[12]', - '%': 'Function("return escape")()([]["fill"])[21]', - '&': '("")["link"](0+")[10]', - '(': '(undefined+[]["fill"])[22]', - ')': '([0]+false+[]["fill"])[20]', - '+': '(+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]])+[])[2]', - ',': '([]["slice"]["call"](false+"")+"")[1]', - '-': '(+(.+[0000000001])+"")[2]', - '.': '(+(+!+[]+[+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[!+[]+!+[]]+[+[]])+[])[+!+[]]', - '/': '(false+[0])["italics"]()[10]', - ':': '(RegExp()+"")[3]', - ';': '("")["link"](")[14]', - '<': '("")["italics"]()[0]', - '=': '("")["fontcolor"]()[11]', - '>': '("")["italics"]()[2]', - '?': '(RegExp()+"")[2]', - '[': '([]["entries"]()+"")[0]', - ']': '([]["entries"]()+"")[22]', - '{': '(true+[]["fill"])[20]', - '}': '([]["fill"]+"")["slice"]("-1")' -} - -SIMPLE = { - 'false': '![]', - 'true': '!![]', - 'undefined': '[][[]]', - 'NaN': '+[![]]', - 'Infinity': '+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])' # +"1e1000" -} - -def jsunc(jscString): - - for key in sorted(MAPPING, key=lambda k: len(MAPPING[k]), reverse=True): - if MAPPING.get(key) in jscString: - jscString = jscString.replace(MAPPING.get(key), '"{}"'.format(key)) - - for key in sorted(SIMPLE, key=lambda k: len(SIMPLE[k]), reverse=True): - if SIMPLE.get(key) in jscString: - jscString = jscString.replace(SIMPLE.get(key), '{}'.format(key)) - return jscString \ No newline at end of file diff --git a/lib/jscrypto.py b/lib/jscrypto.py deleted file mode 100644 index 7d8e8da3..00000000 --- a/lib/jscrypto.py +++ /dev/null @@ -1,550 +0,0 @@ -# -*- coding: utf-8 -*- - -import StringIO -import binascii -import hashlib -from array import array - - -def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"): - target_key_size = key_size + iv_size - derived_bytes = "" - number_of_derived_words = 0 - block = None - hasher = hashlib.new(hash_algorithm) - while number_of_derived_words < target_key_size: - if block is not None: - hasher.update(block) - - hasher.update(passwd) - hasher.update(salt) - block = hasher.digest() - hasher = hashlib.new(hash_algorithm) - - for i in range(1, iterations): - hasher.update(block) - block = hasher.digest() - hasher = hashlib.new(hash_algorithm) - - derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)] - - number_of_derived_words += len(block) / 4 - - return { - "key": derived_bytes[0: key_size * 4], - "iv": derived_bytes[key_size * 4:] - } - - -class PKCS7Encoder(object): - ''' - RFC 2315: PKCS#7 page 21 - Some content-encryption algorithms assume the - input length is a multiple of k octets, where k > 1, and - let the application define a method for handling inputs - whose lengths are not a multiple of k octets. For such - algorithms, the method shall be to pad the input at the - trailing end with k - (l mod k) octets all having value k - - (l mod k), where l is the length of the input. In other - words, the input is padded at the trailing end with one of - the following strings: - - 01 -- if l mod k = k-1 - 02 02 -- if l mod k = k-2 - . - . - . - k k ... k k -- if l mod k = 0 - - The padding can be removed unambiguously since all input is - padded and no padding string is a suffix of another. This - padding method is well-defined if and only if k < 256; - methods for larger k are an open issue for further study. - ''' - - def __init__(self, k=16): - self.k = k - - ## @param text The padded text for which the padding is to be removed. - # @exception ValueError Raised when the input padding is missing or corrupt. - def decode(self, text): - ''' - Remove the PKCS#7 padding from a text string - ''' - nl = len(text) - val = int(binascii.hexlify(text[-1]), 16) - if val > self.k: - raise ValueError('Input is not padded or padding is corrupt') - - l = nl - val - return text[:l] - - ## @param text The text to encode. - def encode(self, text): - ''' - Pad an input string according to PKCS#7 - ''' - l = len(text) - output = StringIO.StringIO() - val = self.k - (l % self.k) - for _ in xrange(val): - output.write('%02x' % val) - return text + binascii.unhexlify(output.getvalue()) - - -# Pyaes file -# Globals mandated by PEP 272: -# http://www.python.org/dev/peps/pep-0272/ -MODE_ECB = 1 -MODE_CBC = 2 -# MODE_CTR = 6 - -block_size = 16 -key_size = None - - -def new(key, mode, IV=None): - if mode == MODE_ECB: - return ECBMode(AES(key)) - elif mode == MODE_CBC: - if IV is None: - raise ValueError, "CBC mode needs an IV value!" - - return CBCMode(AES(key), IV) - else: - raise NotImplementedError - - -#### AES cipher implementation - -class AES(object): - block_size = 16 - - def __init__(self, key): - self.setkey(key) - - def setkey(self, key): - """Sets the key and performs key expansion.""" - - self.key = key - self.key_size = len(key) - - if self.key_size == 16: - self.rounds = 10 - elif self.key_size == 24: - self.rounds = 12 - elif self.key_size == 32: - self.rounds = 14 - else: - raise ValueError, "Key length must be 16, 24 or 32 bytes" - - self.expand_key() - - def expand_key(self): - """Performs AES key expansion on self.key and stores in self.exkey""" - - # The key schedule specifies how parts of the key are fed into the - # cipher's round functions. "Key expansion" means performing this - # schedule in advance. Almost all implementations do this. - # - # Here's a description of AES key schedule: - # http://en.wikipedia.org/wiki/Rijndael_key_schedule - - # The expanded key starts with the actual key itself - exkey = array('B', self.key) - - # extra key expansion steps - if self.key_size == 16: - extra_cnt = 0 - elif self.key_size == 24: - extra_cnt = 2 - else: - extra_cnt = 3 - - # 4-byte temporary variable for key expansion - word = exkey[-4:] - # Each expansion cycle uses 'i' once for Rcon table lookup - for i in xrange(1, 11): - - #### key schedule core: - # left-rotate by 1 byte - word = word[1:4] + word[0:1] - - # apply S-box to all bytes - for j in xrange(4): - word[j] = aes_sbox[word[j]] - - # apply the Rcon table to the leftmost byte - word[0] = word[0] ^ aes_Rcon[i] - #### end key schedule core - - for z in xrange(4): - for j in xrange(4): - # mix in bytes from the last subkey - word[j] ^= exkey[-self.key_size + j] - exkey.extend(word) - - # Last key expansion cycle always finishes here - if len(exkey) >= (self.rounds + 1) * self.block_size: - break - - # Special substitution step for 256-bit key - if self.key_size == 32: - for j in xrange(4): - # mix in bytes from the last subkey XORed with S-box of - # current word bytes - word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j] - exkey.extend(word) - - # Twice for 192-bit key, thrice for 256-bit key - for z in xrange(extra_cnt): - for j in xrange(4): - # mix in bytes from the last subkey - word[j] ^= exkey[-self.key_size + j] - exkey.extend(word) - - self.exkey = exkey - - def add_round_key(self, block, round): - """AddRoundKey step in AES. This is where the key is mixed into plaintext""" - - offset = round * 16 - exkey = self.exkey - - for i in xrange(16): - block[i] ^= exkey[offset + i] - - # print 'AddRoundKey:', block - - def sub_bytes(self, block, sbox): - """SubBytes step, apply S-box to all bytes - - Depending on whether encrypting or decrypting, a different sbox array - is passed in. - """ - - for i in xrange(16): - block[i] = sbox[block[i]] - - # print 'SubBytes :', block - - def shift_rows(self, b): - """ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3 - - Since we're performing this on a transposed matrix, cells are numbered - from top to bottom:: - - 0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change - 1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around) - 2 6 10 14 -> 10 14 2 6 -- shifted by 2 - 3 7 11 15 -> 15 3 7 11 -- shifted by 3 - """ - - b[1], b[5], b[9], b[13] = b[5], b[9], b[13], b[1] - b[2], b[6], b[10], b[14] = b[10], b[14], b[2], b[6] - b[3], b[7], b[11], b[15] = b[15], b[3], b[7], b[11] - - # print 'ShiftRows :', b - - def shift_rows_inv(self, b): - """Similar to shift_rows above, but performed in inverse for decryption.""" - - b[5], b[9], b[13], b[1] = b[1], b[5], b[9], b[13] - b[10], b[14], b[2], b[6] = b[2], b[6], b[10], b[14] - b[15], b[3], b[7], b[11] = b[3], b[7], b[11], b[15] - - # print 'ShiftRows :', b - - def mix_columns(self, block): - """MixColumns step. Mixes the values in each column""" - - # Cache global multiplication tables (see below) - mul_by_2 = gf_mul_by_2 - mul_by_3 = gf_mul_by_3 - - # Since we're dealing with a transposed matrix, columns are already - # sequential - for i in xrange(4): - col = i * 4 - - # v0, v1, v2, v3 = block[col : col+4] - v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2], - block[col + 3]) - - block[col] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1] - block[col + 1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2] - block[col + 2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3] - block[col + 3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0] - - # print 'MixColumns :', block - - def mix_columns_inv(self, block): - """Similar to mix_columns above, but performed in inverse for decryption.""" - - # Cache global multiplication tables (see below) - mul_9 = gf_mul_by_9 - mul_11 = gf_mul_by_11 - mul_13 = gf_mul_by_13 - mul_14 = gf_mul_by_14 - - # Since we're dealing with a transposed matrix, columns are already - # sequential - for i in xrange(4): - col = i * 4 - - v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2], - block[col + 3]) - # v0, v1, v2, v3 = block[col:col+4] - - block[col] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1] - block[col + 1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2] - block[col + 2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3] - block[col + 3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0] - - # print 'MixColumns :', block - - def encrypt_block(self, block): - """Encrypts a single block. This is the main AES function""" - - # For efficiency reasons, the state between steps is transmitted via a - # mutable array, not returned. - self.add_round_key(block, 0) - - for round in xrange(1, self.rounds): - self.sub_bytes(block, aes_sbox) - self.shift_rows(block) - self.mix_columns(block) - self.add_round_key(block, round) - - self.sub_bytes(block, aes_sbox) - self.shift_rows(block) - # no mix_columns step in the last round - self.add_round_key(block, self.rounds) - - def decrypt_block(self, block): - """Decrypts a single block. This is the main AES decryption function""" - - # For efficiency reasons, the state between steps is transmitted via a - # mutable array, not returned. - self.add_round_key(block, self.rounds) - - # count rounds down from 15 ... 1 - for round in xrange(self.rounds - 1, 0, -1): - self.shift_rows_inv(block) - self.sub_bytes(block, aes_inv_sbox) - self.add_round_key(block, round) - self.mix_columns_inv(block) - - self.shift_rows_inv(block) - self.sub_bytes(block, aes_inv_sbox) - self.add_round_key(block, 0) - # no mix_columns step in the last round - - -#### ECB mode implementation - -class ECBMode(object): - """Electronic CodeBook (ECB) mode encryption. - - Basically this mode applies the cipher function to each block individually; - no feedback is done. NB! This is insecure for almost all purposes - """ - - def __init__(self, cipher): - self.cipher = cipher - self.block_size = cipher.block_size - - def ecb(self, data, block_func): - """Perform ECB mode with the given function""" - - if len(data) % self.block_size != 0: - raise ValueError, "Plaintext length must be multiple of 16" - - block_size = self.block_size - data = array('B', data) - - for offset in xrange(0, len(data), block_size): - block = data[offset: offset + block_size] - block_func(block) - data[offset: offset + block_size] = block - - return data.tostring() - - def encrypt(self, data): - """Encrypt data in ECB mode""" - - return self.ecb(data, self.cipher.encrypt_block) - - def decrypt(self, data): - """Decrypt data in ECB mode""" - - return self.ecb(data, self.cipher.decrypt_block) - - -#### CBC mode - -class CBCMode(object): - """Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks. - - In CBC encryption, each plaintext block is XORed with the ciphertext block - preceding it; decryption is simply the inverse. - """ - - # A better explanation of CBC can be found here: - # http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29 - - def __init__(self, cipher, IV): - self.cipher = cipher - self.block_size = cipher.block_size - self.IV = array('B', IV) - - def encrypt(self, data): - """Encrypt data in CBC mode""" - - block_size = self.block_size - if len(data) % block_size != 0: - raise ValueError, "Plaintext length must be multiple of 16" - - data = array('B', data) - IV = self.IV - - for offset in xrange(0, len(data), block_size): - block = data[offset: offset + block_size] - - # Perform CBC chaining - for i in xrange(block_size): - block[i] ^= IV[i] - - self.cipher.encrypt_block(block) - data[offset: offset + block_size] = block - IV = block - - self.IV = IV - return data.tostring() - - def decrypt(self, data): - """Decrypt data in CBC mode""" - - block_size = self.block_size - if len(data) % block_size != 0: - raise ValueError, "Ciphertext length must be multiple of 16" - - data = array('B', data) - IV = self.IV - - for offset in xrange(0, len(data), block_size): - ctext = data[offset: offset + block_size] - block = ctext[:] - self.cipher.decrypt_block(block) - - # Perform CBC chaining - # for i in xrange(block_size): - # data[offset + i] ^= IV[i] - for i in xrange(block_size): - block[i] ^= IV[i] - data[offset: offset + block_size] = block - - IV = ctext - # data[offset : offset+block_size] = block - - self.IV = IV - return data.tostring() - - -#### - -def galois_multiply(a, b): - """Galois Field multiplicaiton for AES""" - p = 0 - while b: - if b & 1: - p ^= a - a <<= 1 - if a & 0x100: - a ^= 0x1b - b >>= 1 - - return p & 0xff - - -# Precompute the multiplication tables for encryption -gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)]) -gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)]) -# ... for decryption -gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)]) -gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)]) -gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)]) -gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)]) - -#### - -# The S-box is a 256-element array, that maps a single byte value to another -# byte value. Since it's designed to be reversible, each value occurs only once -# in the S-box -# -# More information: http://en.wikipedia.org/wiki/Rijndael_S-box - -aes_sbox = array('B', - '637c777bf26b6fc53001672bfed7ab76' - 'ca82c97dfa5947f0add4a2af9ca472c0' - 'b7fd9326363ff7cc34a5e5f171d83115' - '04c723c31896059a071280e2eb27b275' - '09832c1a1b6e5aa0523bd6b329e32f84' - '53d100ed20fcb15b6acbbe394a4c58cf' - 'd0efaafb434d338545f9027f503c9fa8' - '51a3408f929d38f5bcb6da2110fff3d2' - 'cd0c13ec5f974417c4a77e3d645d1973' - '60814fdc222a908846eeb814de5e0bdb' - 'e0323a0a4906245cc2d3ac629195e479' - 'e7c8376d8dd54ea96c56f4ea657aae08' - 'ba78252e1ca6b4c6e8dd741f4bbd8b8a' - '703eb5664803f60e613557b986c11d9e' - 'e1f8981169d98e949b1e87e9ce5528df' - '8ca1890dbfe6426841992d0fb054bb16'.decode('hex') - ) - -# This is the inverse of the above. In other words: -# aes_inv_sbox[aes_sbox[val]] == val - -aes_inv_sbox = array('B', - '52096ad53036a538bf40a39e81f3d7fb' - '7ce339829b2fff87348e4344c4dee9cb' - '547b9432a6c2233dee4c950b42fac34e' - '082ea16628d924b2765ba2496d8bd125' - '72f8f66486689816d4a45ccc5d65b692' - '6c704850fdedb9da5e154657a78d9d84' - '90d8ab008cbcd30af7e45805b8b34506' - 'd02c1e8fca3f0f02c1afbd0301138a6b' - '3a9111414f67dcea97f2cfcef0b4e673' - '96ac7422e7ad3585e2f937e81c75df6e' - '47f11a711d29c5896fb7620eaa18be1b' - 'fc563e4bc6d279209adbc0fe78cd5af4' - '1fdda8338807c731b11210592780ec5f' - '60517fa919b54a0d2de57a9f93c99cef' - 'a0e03b4dae2af5b0c8ebbb3c83539961' - '172b047eba77d626e169146355210c7d'.decode('hex') - ) - -# The Rcon table is used in AES's key schedule (key expansion) -# It's a pre-computed table of exponentation of 2 in AES's finite field -# -# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule - -aes_Rcon = array('B', - '8d01020408102040801b366cd8ab4d9a' - '2f5ebc63c697356ad4b37dfaefc59139' - '72e4d3bd61c29f254a943366cc831d3a' - '74e8cb8d01020408102040801b366cd8' - 'ab4d9a2f5ebc63c697356ad4b37dfaef' - 'c5913972e4d3bd61c29f254a943366cc' - '831d3a74e8cb8d01020408102040801b' - '366cd8ab4d9a2f5ebc63c697356ad4b3' - '7dfaefc5913972e4d3bd61c29f254a94' - '3366cc831d3a74e8cb8d010204081020' - '40801b366cd8ab4d9a2f5ebc63c69735' - '6ad4b37dfaefc5913972e4d3bd61c29f' - '254a943366cc831d3a74e8cb8d010204' - '08102040801b366cd8ab4d9a2f5ebc63' - 'c697356ad4b37dfaefc5913972e4d3bd' - '61c29f254a943366cc831d3a74e8cb'.decode('hex') - ) diff --git a/lib/jsinterpreter.py b/lib/jsinterpreter.py deleted file mode 100644 index feecdc6f..00000000 --- a/lib/jsinterpreter.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- coding: utf-8 -*- - -import json -import operator -import re - - -_OPERATORS = [ - ('|', operator.or_), - ('^', operator.xor), - ('&', operator.and_), - ('>>', operator.rshift), - ('<<', operator.lshift), - ('-', operator.sub), - ('+', operator.add), - ('%', operator.mod), - ('/', operator.truediv), - ('*', operator.mul), -] - -_ASSIGN_OPERATORS = [] -for op, opfunc in _OPERATORS: - _ASSIGN_OPERATORS.append([op + '=', opfunc]) -_ASSIGN_OPERATORS.append(('=', lambda cur, right: right)) - -_NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*' - - -class JSInterpreter(object): - def __init__(self, code, objects=None): - if objects is None: - objects = {} - self.code = code - self._functions = {} - self._objects = objects - - def interpret_statement(self, stmt, local_vars, allow_recursion=100): - - should_abort = False - stmt = stmt.lstrip() - stmt_m = re.match(r'var\s', stmt) - if stmt_m: - expr = stmt[len(stmt_m.group(0)):] - else: - return_m = re.match(r'return(?:\s+|$)', stmt) - if return_m: - expr = stmt[len(return_m.group(0)):] - should_abort = True - else: - # Try interpreting it as an expression - expr = stmt - - v = self.interpret_expression(expr, local_vars, allow_recursion) - return v, should_abort - - def interpret_expression(self, expr, local_vars, allow_recursion): - expr = expr.strip() - - if expr == '': # Empty expression - return None - - if expr.startswith('('): - parens_count = 0 - for m in re.finditer(r'[()]', expr): - if m.group(0) == '(': - parens_count += 1 - else: - parens_count -= 1 - if parens_count == 0: - sub_expr = expr[1:m.start()] - sub_result = self.interpret_expression( - sub_expr, local_vars, allow_recursion) - remaining_expr = expr[m.end():].strip() - if not remaining_expr: - return sub_result - else: - expr = json.dumps(sub_result) + remaining_expr - break - - for op, opfunc in _ASSIGN_OPERATORS: - m = re.match(r'''(?x) - (?P%s)(?:\[(?P[^\]]+?)\])? - \s*%s - (?P.*)$''' % (_NAME_RE, re.escape(op)), expr) - if not m: - continue - right_val = self.interpret_expression( - m.group('expr'), local_vars, allow_recursion - 1) - - if m.groupdict().get('index'): - lvar = local_vars[m.group('out')] - idx = self.interpret_expression( - m.group('index'), local_vars, allow_recursion) - assert isinstance(idx, int) - cur = lvar[idx] - val = opfunc(cur, right_val) - lvar[idx] = val - return val - else: - cur = local_vars.get(m.group('out')) - val = opfunc(cur, right_val) - local_vars[m.group('out')] = val - return val - - if expr.isdigit(): - return int(expr) - - var_m = re.match( - r'(?!if|return|true|false)(?P%s)$' % _NAME_RE, - expr) - if var_m: - return local_vars[var_m.group('name')] - - try: - return json.loads(expr) - except ValueError: - pass - - m = re.match( - r'(?P%s)\.(?P[^(]+)(?:\(+(?P[^()]*)\))?$' % _NAME_RE, - expr) - if m: - variable = m.group('var') - member = m.group('member') - arg_str = m.group('args') - - if variable in local_vars: - obj = local_vars[variable] - else: - if variable not in self._objects: - self._objects[variable] = self.extract_object(variable) - obj = self._objects[variable] - - if arg_str is None: - # Member access - if member == 'length': - return len(obj) - return obj[member] - - assert expr.endswith(')') - # Function call - if arg_str == '': - argvals = tuple() - else: - argvals = [] - for v in arg_str.split(','): - argvals.extend([self.interpret_expression(v, local_vars, allow_recursion)]) - - if member == 'split': - assert argvals == ('',) - return list(obj) - if member == 'join': - assert len(argvals) == 1 - return argvals[0].join(obj) - if member == 'reverse': - assert len(argvals) == 0 - obj.reverse() - return obj - if member == 'slice': - assert len(argvals) == 1 - return obj[argvals[0]:] - if member == 'splice': - assert isinstance(obj, list) - index, howMany = argvals - res = [] - for i in range(index, min(index + howMany, len(obj))): - res.append(obj.pop(index)) - return res - - return obj[member](argvals) - - m = re.match( - r'(?P%s)\[(?P.+)\]$' % _NAME_RE, expr) - if m: - val = local_vars[m.group('in')] - idx = self.interpret_expression( - m.group('idx'), local_vars, allow_recursion - 1) - return val[idx] - - for op, opfunc in _OPERATORS: - m = re.match(r'(?P.+?)%s(?P.+)' % re.escape(op), expr) - if not m: - continue - x, abort = self.interpret_statement( - m.group('x'), local_vars, allow_recursion - 1) - y, abort = self.interpret_statement( - m.group('y'), local_vars, allow_recursion - 1) - return opfunc(x, y) - - m = re.match( - r'^(?P%s)\((?P[a-zA-Z0-9_$,]+)\)$' % _NAME_RE, expr) - if m: - fname = m.group('func') - argvals = [] - for v in m.group('args').split(','): - if v.isdigit(): - argvals.append([int(v)]) - else: - argvals.append([local_vars[v]]) - - if fname not in self._functions: - self._functions[fname] = self.extract_function(fname) - return self._functions[fname](argvals) - - - def extract_object(self, objname): - obj = {} - obj_m = re.search( - (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) + - r'\s*(?P([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' + - r'\}\s*;', - self.code) - fields = obj_m.group('fields') - # Currently, it only supports function definitions - fields_m = re.finditer( - r'(?P[a-zA-Z$0-9]+)\s*:\s*function' - r'\((?P[a-z,]+)\){(?P[^}]+)}', - fields) - for f in fields_m: - argnames = f.group('args').split(',') - obj[f.group('key')] = self.build_function(argnames, f.group('code')) - - return obj - - def extract_function(self, funcname): - func_m = re.search( - r'''(?x) - (?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s* - \((?P[^)]*)\)\s* - \{(?P[^}]+)\}''' % ( - re.escape(funcname), re.escape(funcname), re.escape(funcname)), - self.code) - argnames = func_m.group('args').split(',') - - return self.build_function(argnames, func_m.group('code')) - - def call_function(self, funcname, *args): - f = self.extract_function(funcname) - return f(args) - - def build_function(self, argnames, code): - def resf(args): - local_vars = dict(zip(argnames, args)) - for stmt in code.split(';'): - res, abort = self.interpret_statement(stmt, local_vars) - if abort: - break - return res - return resf diff --git a/lib/pafy/__init__.py b/lib/pafy/__init__.py deleted file mode 100644 index 97f848d2..00000000 --- a/lib/pafy/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .pafy import get_playlist -from .pafy import new -from .pafy import set_api_key -from .pafy import dump_cache -from .pafy import load_cache -from .pafy import get_categoryname -from .pafy import __version__ -from .pafy import __author__ -from .pafy import __license__ -import sys - -if "test" not in sys.argv[0]: - del pafy - -del sys diff --git a/lib/pafy/pafy.py b/lib/pafy/pafy.py deleted file mode 100644 index 17fa8918..00000000 --- a/lib/pafy/pafy.py +++ /dev/null @@ -1,1618 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -pafy.py. - -Python library to download YouTube content and retrieve metadata - -https://github.com/np1/pafy - -Copyright (C) 2013-2014 np1 - -This program is free software: you can redistribute it and/or modify it under -the terms of the GNU Lesser General Public License as published by the Free -Software Foundation, either version 3 of the License, or (at your option) any -later version. - -This program is distributed in the hope that it will be useful, but WITHOUT ANY -WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License along -with this program. If not, see . - -""" - - -from __future__ import unicode_literals - -__version__ = "0.3.74" -__author__ = "np1" -__license__ = "LGPLv3" - - -import re -import os -import sys -import time -import json -import logging -import hashlib -import tempfile -from xml.etree import ElementTree - - -early_py_version = sys.version_info[:2] < (2, 7) - -if sys.version_info[:2] >= (3, 0): - # pylint: disable=E0611,F0401,I0011 - from urllib.request import build_opener - from urllib.error import HTTPError, URLError - from urllib.parse import parse_qs, unquote_plus, urlencode - uni, pyver = str, 3 - -else: - from urllib2 import build_opener, HTTPError, URLError - from urllib import unquote_plus, urlencode - from urlparse import parse_qs - uni, pyver = unicode, 2 - - -if os.environ.get("pafydebug") == "1": - logging.basicConfig(level=logging.DEBUG) - -dbg = logging.debug - - -def parseqs(data): - """ parse_qs, return unicode. """ - if type(data) == uni: - return parse_qs(data) - - elif pyver == 3: - data = data.decode("utf8") - data = parse_qs(data) - - else: - data = parse_qs(data) - out = {} - - for k, v in data.items(): - k = k.decode("utf8") - out[k] = [x.decode("utf8") for x in v] - data = out - - return data - - -def fetch_decode(url, encoding=None): - """ Fetch url and decode. """ - try: - req = g.opener.open(url) - except HTTPError as e: - if e.getcode() == 503: - time.sleep(.5) - return fetch_decode(url, encoding) - else: - raise e - - ct = req.headers['content-type'] - - if encoding: - return req.read().decode(encoding) - - elif "charset=" in ct: - dbg("charset: %s", ct) - encoding = re.search(r"charset=([\w-]+)\s*(:?;|$)", ct).group(1) - return req.read().decode(encoding) - - else: - dbg("encoding unknown") - return req.read() - - -def new(url, basic=True, gdata=False, signature=True, size=False, - callback=lambda x: None): - """ Return a new pafy instance given a url or video id. - - NOTE: The signature argument has been deprecated and now has no effect, - it will be removed in a future version. - - Optional arguments: - basic - fetch basic metadata and streams - gdata - fetch gdata info (upload date, description, category) - size - fetch the size of each stream (slow)(decrypts urls if needed) - callback - a callback function to receive status strings - - If any of the first three above arguments are False, those data items will - be fetched only when first called for. - - The defaults are recommended for most cases. If you wish to create - many video objects at once, you may want to set basic to False, eg: - - video = pafy.new(basic=False) - - This will be quick because no http requests will be made on initialisation. - - Setting size to True will override the basic argument and force basic data - to be fetched too (basic data is required to obtain Stream objects). - - """ - if not signature: - logging.warning("signature argument has no effect and will be removed" - " in a future version.") - - return Pafy(url, basic, gdata, signature, size, callback) - - -def get_video_info(video_id, newurl=None): - """ Return info for video_id. Returns dict. """ - url = g.urls['vidinfo'] % video_id - url = newurl if newurl else url - info = fetch_decode(url) # bytes - info = parseqs(info) # unicode dict - dbg("Fetched video info%s", " (age ver)" if newurl else "") - - if info['status'][0] == "fail" and info['errorcode'][0] == '150' and \ - "confirm your age" in info['reason'][0]: - # Video requires age verification - dbg("Age verification video") - new.callback("Age verification video") - newurl = g.urls['age_vidinfo'] % (video_id, video_id) - info = get_video_info(video_id, newurl) - info.update({"age_ver": True}) - - elif info['status'][0] == "fail": - reason = info['reason'][0] or "Bad video argument" - raise IOError("Youtube says: %s [%s]" % (reason, video_id)) - - return info - - -def get_video_gdata(video_id): - """ Return json string containing video metadata from gdata api. """ - new.callback("Fetching video gdata") - query = {'part': 'id,snippet,statistics', - 'maxResults': 1, - 'id': video_id, - 'key': g.api_key} - url = g.urls['gdata'] + '?' + urlencode(query) - gdata = fetch_decode(url) # unicode - dbg("Fetched video gdata") - new.callback("Fetched video gdata") - return gdata - - -def extract_video_id(url): - """ Extract the video id from a url, return video id as str. """ - ok = (r"\w-",) * 3 - regx = re.compile(r'(?:^|[^%s]+)([%s]{11})(?:[^%s]+|$)' % ok) - url = str(url) - m = regx.search(url) - - if not m: - err = "Need 11 character video id or the URL of the video. Got %s" - raise ValueError(err % url) - - vidid = m.group(1) - return vidid - - -class g(object): - - """ Class for holding constants needed throughout the module. """ - - urls = { - 'gdata': "https://www.googleapis.com/youtube/v3/videos", - 'watchv': "http://www.youtube.com/watch?v=%s", - 'vidcat': "https://www.googleapis.com/youtube/v3/videoCategories", - 'vidinfo': ('http://www.youtube.com/get_video_info?' - 'video_id=%s&asv=3&el=detailpage&hl=en_US'), - 'playlist': ('http://www.youtube.com/list_ajax?' - 'style=json&action_get_list=1&list=%s'), - 'age_vidinfo': ('http://www.youtube.com/get_video_info?video_id=%s&' - 'eurl=https://youtube.googleapis.com/v/%s&sts=1588') - } - api_key = "AIzaSyCIM4EzNqi1in22f4Z3Ru3iYvLaY8tc3bo" - user_agent = "pafy " + __version__ - UEFSM = 'url_encoded_fmt_stream_map' - AF = 'adaptive_fmts' - jsplayer = r';ytplayer\.config\s*=\s*({.*?});' - lifespan = 60 * 60 * 5 # 5 hours - opener = build_opener() - opener.addheaders = [('User-Agent', user_agent)] - cache = {} - itags = { - '5': ('320x240', 'flv', "normal", ''), - '17': ('176x144', '3gp', "normal", ''), - '18': ('640x360', 'mp4', "normal", ''), - '22': ('1280x720', 'mp4', "normal", ''), - '34': ('640x360', 'flv', "normal", ''), - '35': ('854x480', 'flv', "normal", ''), - '36': ('320x240', '3gp', "normal", ''), - '37': ('1920x1080', 'mp4', "normal", ''), - '38': ('4096x3072', 'mp4', "normal", '4:3 hi-res'), - '43': ('640x360', 'webm', "normal", ''), - '44': ('854x480', 'webm', "normal", ''), - '45': ('1280x720', 'webm', "normal", ''), - '46': ('1920x1080', 'webm', "normal", ''), - '82': ('640x360-3D', 'mp4', "normal", ''), - '83': ('640x480-3D', 'mp4', 'normal', ''), - '84': ('1280x720-3D', 'mp4', "normal", ''), - '100': ('640x360-3D', 'webm', "normal", ''), - '102': ('1280x720-3D', 'webm', "normal", ''), - '133': ('426x240', 'm4v', 'video', ''), - '134': ('640x360', 'm4v', 'video', ''), - '135': ('854x480', 'm4v', 'video', ''), - '136': ('1280x720', 'm4v', 'video', ''), - '137': ('1920x1080', 'm4v', 'video', ''), - '138': ('4096x3072', 'm4v', 'video', ''), - '139': ('48k', 'm4a', 'audio', ''), - '140': ('128k', 'm4a', 'audio', ''), - '141': ('256k', 'm4a', 'audio', ''), - '160': ('256x144', 'm4v', 'video', ''), - '167': ('640x480', 'webm', 'video', ''), - '168': ('854x480', 'webm', 'video', ''), - '169': ('1280x720', 'webm', 'video', ''), - '170': ('1920x1080', 'webm', 'video', ''), - '171': ('128k', 'ogg', 'audio', ''), - '172': ('192k', 'ogg', 'audio', ''), - '218': ('854x480', 'webm', 'video', 'VP8'), - '219': ('854x480', 'webm', 'video', 'VP8'), - '242': ('360x240', 'webm', 'video', 'VP9'), - '243': ('480x360', 'webm', 'video', 'VP9'), - '244': ('640x480', 'webm', 'video', 'VP9 low'), - '245': ('640x480', 'webm', 'video', 'VP9 med'), - '246': ('640x480', 'webm', 'video', 'VP9 high'), - '247': ('720x480', 'webm', 'video', 'VP9'), - '248': ('1920x1080', 'webm', 'video', 'VP9'), - '249': ('48k', 'ogg', 'audio', 'Opus'), - '250': ('56k', 'ogg', 'audio', 'Opus'), - '251': ('128k', 'ogg', 'audio', 'Opus'), - '256': ('192k', 'm4a', 'audio', '6-channel'), - '258': ('320k', 'm4a', 'audio', '6-channel'), - '264': ('2560x1440', 'm4v', 'video', ''), - '266': ('3840x2160', 'm4v', 'video', 'AVC'), - '271': ('1920x1280', 'webm', 'video', 'VP9'), - '272': ('3414x1080', 'webm', 'video', 'VP9'), - '278': ('256x144', 'webm', 'video', 'VP9'), - '298': ('1280x720', 'm4v', 'video', '60fps'), - '299': ('1920x1080', 'm4v', 'video', '60fps'), - '302': ('1280x720', 'webm', 'video', 'VP9'), - '303': ('1920x1080', 'webm', 'video', 'VP9'), - } - - -def _extract_smap(map_name, dic, zero_idx=True): - """ Extract stream map, returns list of dicts. """ - if map_name in dic: - smap = dic.get(map_name) - smap = smap[0] if zero_idx else smap - smap = smap.split(",") - smap = [parseqs(x) for x in smap] - return [dict((k, v[0]) for k, v in x.items()) for x in smap] - - return [] - - -def _extract_dash(dashurl): - """ Download dash url and extract some data. """ - # pylint: disable = R0914 - dbg("Fetching dash page") - dashdata = fetch_decode(dashurl) - dbg("DASH list fetched") - ns = "{urn:mpeg:DASH:schema:MPD:2011}" - ytns = "{http://youtube.com/yt/2012/10/10}" - tree = ElementTree.fromstring(dashdata) - tlist = tree.findall(".//%sRepresentation" % ns) - dashmap = [] - - for x in tlist: - baseurl = x.find("%sBaseURL" % ns) - url = baseurl.text - size = baseurl.attrib["%scontentLength" % ytns] - bitrate = x.get("bandwidth") - itag = uni(x.get("id")) - width = uni(x.get("width")) - height = uni(x.get("height")) - type_ = re.search(r"(?:\?|&)mime=([\w\d\/]+)", url).group(1) - dashmap.append(dict(bitrate=bitrate, - dash=True, - itag=itag, - width=width, - height=height, - url=url, - size=size, - type=type_)) - return dashmap - - -def _extract_function_from_js(name, js): - """ Find a function definition called `name` and extract components. - - Return a dict representation of the function. - - """ - dbg("Extracting function '%s' from javascript", name) - fpattern = r'function\s+%s\(((?:\w+,?)+)\)\{([^}]+)\}' - m = re.search(fpattern % re.escape(name), js) - args, body = m.groups() - dbg("extracted function %s(%s){%s};", name, args, body) - func = {'name': name, 'parameters': args.split(","), 'body': body} - return func - - -def _extract_dictfunc_from_js(name, js): - """ Find anonymous function from within a dict. """ - dbg("Extracting function '%s' from javascript", name) - var, _, fname = name.partition(".") - fpattern = (r'var\s+%s\s*\=\s*\{.{,2000}?%s' - r'\:function\(((?:\w+,?)+)\)\{([^}]+)\}') - m = re.search(fpattern % (re.escape(var), re.escape(fname)), js) - args, body = m.groups() - dbg("extracted dict function %s(%s){%s};", name, args, body) - func = {'name': name, 'parameters': args.split(","), 'body': body} - return func - - -def _get_mainfunc_from_js(js): - """ Return main signature decryption function from javascript as dict. """ - dbg("Scanning js for main function.") - m = re.search(r'\w\.sig\|\|([$\w]+)\(\w+\.\w+\)', js) - funcname = m.group(1) - dbg("Found main function: %s", funcname) - function = _extract_function_from_js(funcname, js) - return function - - -def _get_other_funcs(primary_func, js): - """ Return all secondary functions used in primary_func. """ - dbg("scanning javascript for secondary functions.") - body = primary_func['body'] - body = body.split(";") - # standard function call; X=F(A,B,C...) - call = re.compile(r'(?:[$\w+])=([$\w]+)\(((?:\w+,?)+)\)$') - - # dot notation function call; X=O.F(A,B,C..) - dotcall = re.compile(r'(?:[$\w+]=)?([$\w]+)\.([$\w]+)\(((?:\w+,?)+)\)$') - - functions = {} - - for part in body: - - # is this a function? - if call.match(part): - match = call.match(part) - name = match.group(1) - # dbg("found secondary function '%s'", name) - - if name not in functions: - # extract from javascript if not previously done - functions[name] = _extract_function_from_js(name, js) - - # else: - # dbg("function '%s' is already in map.", name) - elif dotcall.match(part): - - match = dotcall.match(part) - name = "%s.%s" % (match.group(1), match.group(2)) - - # don't treat X=A.slice(B) as X=O.F(B) - if match.group(2) in ["slice", "splice"]: - continue - - if name not in functions: - functions[name] = _extract_dictfunc_from_js(name, js) - - return functions - - -def _getval(val, argsdict): - """ resolve variable values, preserve int literals. Return dict.""" - m = re.match(r'(\d+)', val) - - if m: - return int(m.group(1)) - - elif val in argsdict: - return argsdict[val] - - else: - raise IOError("Error val %s from dict %s" % (val, argsdict)) - - -def _get_func_from_call(caller, name, arguments, js_url): - """ - Return called function complete with called args given a caller function . - - This function requires that Pafy.funcmap contains the function `name`. - It retrieves the function and fills in the parameter values as called in - the caller, returning them in the returned newfunction `args` dict - - """ - newfunction = Pafy.funcmap[js_url][name] - newfunction['args'] = {} - - for n, arg in enumerate(arguments): - value = _getval(arg, caller['args']) - - # function may not use all arguments - if n < len(newfunction['parameters']): - param = newfunction['parameters'][n] - newfunction['args'][param] = value - - return newfunction - - -def _solve(f, js_url, returns=True): - """Solve basic javascript function. Return solution value (str). """ - # pylint: disable=R0914,R0912 - resv = "slice|splice|reverse" - patterns = { - 'split_or_join': r'(\w+)=\1\.(?:split|join)\(""\)$', - 'func_call': r'(\w+)=([$\w]+)\(((?:\w+,?)+)\)$', - 'x1': r'var\s(\w+)=(\w+)\[(\w+)\]$', - 'x2': r'(\w+)\[(\w+)\]=(\w+)\[(\w+)\%(\w+)\.length\]$', - 'x3': r'(\w+)\[(\w+)\]=(\w+)$', - 'return': r'return (\w+)(\.join\(""\))?$', - 'reverse': r'(\w+)=(\w+)\.reverse\(\)$', - 'reverse_noass': r'(\w+)\.reverse\(\)$', - 'return_reverse': r'return (\w+)\.reverse\(\)$', - 'slice': r'(\w+)=(\w+)\.slice\((\w+)\)$', - 'splice_noass': r'([$\w]+)\.splice\(([$\w]+)\,([$\w]+)\)$', - 'return_slice': r'return (\w+)\.slice\((\w+)\)$', - 'func_call_dict': r'(\w)=([$\w]+)\.(?!%s)([$\w]+)\(((?:\w+,?)+)\)$' - % resv, - 'func_call_dict_noret': r'([$\w]+)\.(?!%s)([$\w]+)\(((?:\w+,?)+)\)$' - % resv - } - - parts = f['body'].split(";") - - for part in parts: - # dbg("Working on part: " + part) - - name = "" - - for n, p in patterns.items(): - m, name = re.match(p, part), n - - if m: - break - else: - raise IOError("no match for %s" % part) - - if name == "split_or_join": - pass - - elif name == "func_call_dict": - lhs, dic, key, args = m.group(1, 2, 3, 4) - funcname = "%s.%s" % (dic, key) - newfunc = _get_func_from_call(f, funcname, args.split(","), js_url) - f['args'][lhs] = _solve(newfunc, js_url) - - elif name == "func_call_dict_noret": - dic, key, args = m.group(1, 2, 3) - funcname = "%s.%s" % (dic, key) - newfunc = _get_func_from_call(f, funcname, args.split(","), js_url) - changed_args = _solve(newfunc, js_url, returns=False) - - for arg in f['args']: - - if arg in changed_args: - f['args'][arg] = changed_args[arg] - - elif name == "func_call": - lhs, funcname, args = m.group(1, 2, 3) - newfunc = _get_func_from_call(f, funcname, args.split(","), js_url) - f['args'][lhs] = _solve(newfunc, js_url) # recursive call - - # new var is an index of another var; eg: var a = b[c] - elif name == "x1": - b, c = [_getval(x, f['args']) for x in m.group(2, 3)] - f['args'][m.group(1)] = b[c] - - # a[b]=c[d%e.length] - elif name == "x2": - vals = m.group(*range(1, 6)) - a, b, c, d, e = [_getval(x, f['args']) for x in vals] - f['args'][m.group(1)] = a[:b] + c[d % len(e)] + a[b + 1:] - - # a[b]=c - elif name == "x3": - a, b, c = [_getval(x, f['args']) for x in m.group(1, 2, 3)] - f['args'][m.group(1)] = a[:b] + c + a[b + 1:] # a[b] = c - - elif name == "return": - return f['args'][m.group(1)] - - elif name == "reverse": - f['args'][m.group(1)] = _getval(m.group(2), f['args'])[::-1] - - elif name == "reverse_noass": - f['args'][m.group(1)] = _getval(m.group(1), f['args'])[::-1] - - elif name == "splice_noass": - a, b, c = [_getval(x, f['args']) for x in m.group(1, 2, 3)] - f['args'][m.group(1)] = a[:b] + a[b + c:] - - elif name == "return_reverse": - return f['args'][m.group(1)][::-1] - - elif name == "return_slice": - a, b = [_getval(x, f['args']) for x in m.group(1, 2)] - return a[b:] - - elif name == "slice": - a, b, c = [_getval(x, f['args']) for x in m.group(1, 2, 3)] - f['args'][m.group(1)] = b[c:] - - if not returns: - # Return the args dict if no return statement in function - return f['args'] - - else: - raise IOError("Processed js funtion parts without finding return") - - -def _decodesig(sig, js_url): - """ Return decrypted sig given an encrypted sig and js_url key. """ - # lookup main function in Pafy.funcmap dict - mainfunction = Pafy.funcmap[js_url]['mainfunction'] - param = mainfunction['parameters'] - - if not len(param) == 1: - raise IOError("Main sig js function has more than one arg: %s" % param) - - # fill in function argument with signature - mainfunction['args'] = {param[0]: sig} - new.callback("Decrypting signature") - solved = _solve(mainfunction, js_url) - dbg("Decrypted sig = %s...", solved[:30]) - new.callback("Decrypted signature") - return solved - - -def remux(infile, outfile, quiet=False, muxer="ffmpeg"): - """ Remux audio. """ - from subprocess import call, STDOUT - muxer = muxer if isinstance(muxer, str) else "ffmpeg" - - for tool in set([muxer, "ffmpeg", "avconv"]): - cmd = [tool, "-y", "-i", infile, "-acodec", "copy", "-vn", outfile] - - try: - with open(os.devnull, "w") as devnull: - call(cmd, stdout=devnull, stderr=STDOUT) - - except OSError: - dbg("Failed to remux audio using %s", tool) - - else: - os.unlink(infile) - dbg("remuxed audio file using %s" % tool) - - if not quiet: - sys.stdout.write("\nAudio remuxed.\n") - - break - - else: - logging.warning("audio remux failed") - os.rename(infile, outfile) - - -def cache(name): - """ Returns a sub-cache dictionary under which global key, value pairs - can be stored. Regardless of whether a dictionary already exists for - the given name, the sub-cache is returned by reference. - """ - if name not in g.cache: - g.cache[name] = {} - return g.cache[name] - - -def fetch_cached(url, encoding=None, dbg_ref="", file_prefix=""): - """ Fetch url - from tmpdir if already retrieved. """ - tmpdir = os.path.join(tempfile.gettempdir(), "pafy") - - if not os.path.exists(tmpdir): - os.makedirs(tmpdir) - - url_md5 = hashlib.md5(url.encode("utf8")).hexdigest() - cached_filename = os.path.join(tmpdir, file_prefix + url_md5) - - if os.path.exists(cached_filename): - dbg("fetched %s from cache", dbg_ref) - - with open(cached_filename) as f: - retval = f.read() - - return retval - - else: - data = fetch_decode(url, "utf8") # unicode - dbg("Fetched %s", dbg_ref) - new.callback("Fetched %s" % dbg_ref) - - with open(cached_filename, "w") as f: - f.write(data) - - # prune files after write - prune_files(tmpdir, file_prefix) - return data - - -def prune_files(path, prefix="", age_max=3600 * 24 * 14, count_max=4): - """ Remove oldest files from path that start with prefix. - - remove files older than age_max, leave maximum of count_max files. - """ - tempfiles = [] - - if not os.path.isdir(path): - return - - for f in os.listdir(path): - filepath = os.path.join(path, f) - - if os.path.isfile(filepath) and f.startswith(prefix): - age = time.time() - os.path.getmtime(filepath) - - if age > age_max: - os.unlink(filepath) - - else: - tempfiles.append((filepath, age)) - - tempfiles = sorted(tempfiles, key=lambda x: x[1], reverse=True) - - for f in tempfiles[:-count_max]: - os.unlink(f[0]) - - -def get_js_sm(video_id): - """ Fetch watchinfo page and extract stream map and js funcs if not known. - - This function is needed by videos with encrypted signatures. - If the js url referred to in the watchv page is not a key in Pafy.funcmap, - the javascript is fetched and functions extracted. - - Returns streammap (list of dicts), js url (str) and funcs (dict) - - """ - watch_url = g.urls['watchv'] % video_id - new.callback("Fetching watch page") - watchinfo = fetch_decode(watch_url) # unicode - dbg("Fetched watch page") - new.callback("Fetched watch page") - m = re.search(g.jsplayer, watchinfo) - myjson = json.loads(m.group(1)) - stream_info = myjson['args'] - dash_url = stream_info['dashmpd'] - sm = _extract_smap(g.UEFSM, stream_info, False) - asm = _extract_smap(g.AF, stream_info, False) - js_url = myjson['assets']['js'] - js_url = "https:" + js_url if js_url.startswith("//") else js_url - funcs = Pafy.funcmap.get(js_url) - - if not funcs: - dbg("Fetching javascript") - new.callback("Fetching javascript") - javascript = fetch_cached(js_url, encoding="utf8", - dbg_ref="javascript", file_prefix="js-") - mainfunc = _get_mainfunc_from_js(javascript) - funcs = _get_other_funcs(mainfunc, javascript) - funcs['mainfunction'] = mainfunc - - elif funcs: - dbg("Using functions in memory extracted from %s", js_url) - dbg("Mem contains %s js func sets", len(Pafy.funcmap)) - - return (sm, asm), js_url, funcs, dash_url - - -def _make_url(raw, sig, quick=True): - """ Return usable url. Set quick=False to disable ratebypass override. """ - if quick and "ratebypass=" not in raw: - raw += "&ratebypass=yes" - - if "signature=" not in raw: - - if sig is None: - raise IOError("Error retrieving url") - - raw += "&signature=" + sig - - return raw - - -class Stream(object): - - """ YouTube video stream class. """ - - def __init__(self, sm, parent): - """ Set initial values. """ - self._itag = sm['itag'] - # is_dash = "width" in sm and "height" in sm - is_dash = "dash" in sm - - if self._itag not in g.itags: - logging.warning("Unknown itag: %s", self._itag) - return None - - self._mediatype = g.itags[self.itag][2] - self._threed = 'stereo3d' in sm and sm['stereo3d'] == '1' - - if is_dash: - - if sm['width'] != "None": # dash video - self._resolution = "%sx%s" % (sm['width'], sm['height']) - self._quality = self._resolution - self._dimensions = (int(sm['width']), int(sm['height'])) - - else: # dash audio - self._resolution = "0x0" - self._dimensions = (0, 0) - self._rawbitrate = int(sm['bitrate']) - # self._bitrate = uni(int(sm['bitrate']) // 1024) + "k" - self._bitrate = g.itags[self.itag][0] - self._quality = self._bitrate - - self._fsize = int(sm['size']) - # self._bitrate = sm['bitrate'] - # self._rawbitrate = uni(int(self._bitrate) // 1024) + "k" - - else: # not dash - self._resolution = g.itags[self.itag][0] - self._fsize = None - self._bitrate = self._rawbitrate = None - self._dimensions = tuple(self.resolution.split("-")[0].split("x")) - self._dimensions = tuple([int(x) if x.isdigit() else x for x in - self._dimensions]) - self._quality = self.resolution - - self._vidformat = sm['type'].split(';')[0] # undocumented - self._extension = g.itags[self.itag][1] - self._title = parent.title - self.encrypted = 's' in sm - self._parent = parent - self._filename = self.generate_filename() - self._notes = g.itags[self.itag][3] - self._url = None - self._rawurl = sm['url'] - self._sig = sm['s'] if self.encrypted else sm.get("sig") - self._active = False - - if self.mediatype == "audio" and not is_dash: - self._dimensions = (0, 0) - self._bitrate = self.resolution - self._quality = self.bitrate - self._resolution = "0x0" - self._rawbitrate = int(sm["bitrate"]) - - def generate_filename(self, meta=False): - """ Generate filename. """ - ok = re.compile(r'[^/]') - - if os.name == "nt": - ok = re.compile(r'[^\\/:*?"<>|]') - - filename = "".join(x if ok.match(x) else "_" for x in self._title) - - if meta: - filename += "-%s-%s" % (self._parent.videoid, self._itag) - - filename += "." + self._extension - return filename - - @property - def rawbitrate(self): - """ Return raw bitrate value. """ - return self._rawbitrate - - @property - def threed(self): - """ Return bool, True if stream is 3D. """ - return self._threed - - @property - def itag(self): - """ Return itag value of stream. """ - return self._itag - - @property - def resolution(self): - """ Return resolution of stream as str. 0x0 if audio. """ - return self._resolution - - @property - def dimensions(self): - """ Return dimensions of stream as tuple. (0, 0) if audio. """ - return self._dimensions - - @property - def quality(self): - """ Return quality of stream (bitrate or resolution). - - eg, 128k or 640x480 (str) - """ - return self._quality - - @property - def title(self): - """ Return YouTube video title as a string. """ - return self._title - - @property - def extension(self): - """ Return appropriate file extension for stream (str). - - Possible values are: 3gp, m4a, m4v, mp4, webm, ogg - """ - return self._extension - - @property - def bitrate(self): - """ Return bitrate of an audio stream. """ - return self._bitrate - - @property - def mediatype(self): - """ Return mediatype string (normal, audio or video). - - (normal means a stream containing both video and audio.) - """ - return self._mediatype - - @property - def notes(self): - """ Return additional notes regarding the stream format. """ - return self._notes - - @property - def filename(self): - """ Return filename of stream; derived from title and extension. """ - return self._filename - - @property - def url(self): - """ Return the url, decrypt if required. """ - if not self._url: - - if self._parent.age_ver: - - if self._sig: - s = self._sig - self._sig = s[2:63] + s[82] + s[64:82] + s[63] - - elif self.encrypted: - self._sig = _decodesig(self._sig, self._parent.js_url) - - self._url = _make_url(self._rawurl, self._sig) - - return self._url - - @property - def url_https(self): - """ Return https url. """ - return self.url.replace("http://", "https://") - - def __repr__(self): - """ Return string representation. """ - out = "%s:%s@%s" % (self.mediatype, self.extension, self.quality) - return out - - def get_filesize(self): - """ Return filesize of the stream in bytes. Set member variable. """ - if not self._fsize: - - try: - dbg("Getting stream size") - cl = "content-length" - self._fsize = int(g.opener.open(self.url).headers[cl]) - dbg("Got stream size") - - except (AttributeError, HTTPError, URLError): - self._fsize = 0 - - return self._fsize - - def cancel(self): - """ Cancel an active download. """ - if self._active: - self._active = False - return True - - def download(self, filepath="", quiet=False, callback=lambda *x: None, - meta=False, remux_audio=False): - """ Download. Use quiet=True to supress output. Return filename. - - Use meta=True to append video id and itag to generated filename - Use remax_audio=True to remux audio file downloads - - """ - # pylint: disable=R0912,R0914 - # Too many branches, too many local vars - savedir = filename = "" - - if filepath and os.path.isdir(filepath): - savedir, filename = filepath, self.generate_filename() - - elif filepath: - savedir, filename = os.path.split(filepath) - - else: - filename = self.generate_filename(meta=meta) - - filepath = os.path.join(savedir, filename) - temp_filepath = filepath + ".temp" - - status_string = (' {:,} Bytes [{:.2%}] received. Rate: [{:4.0f} ' - 'KB/s]. ETA: [{:.0f} secs]') - - if early_py_version: - status_string = (' {0:} Bytes [{1:.2%}] received. Rate:' - ' [{2:4.0f} KB/s]. ETA: [{3:.0f} secs]') - - response = g.opener.open(self.url) - total = int(response.info()['Content-Length'].strip()) - chunksize, bytesdone, t0 = 16384, 0, time.time() - - fmode, offset = "wb", 0 - - if os.path.exists(temp_filepath): - if os.stat(temp_filepath).st_size < total: - - offset = os.stat(temp_filepath).st_size - fmode = "ab" - - outfh = open(temp_filepath, fmode) - - if offset: - # partial file exists, resume download - resuming_opener = build_opener() - resuming_opener.addheaders = [('User-Agent', g.user_agent), - ("Range", "bytes=%s-" % offset)] - response = resuming_opener.open(self.url) - bytesdone = offset - - self._active = True - - while self._active: - chunk = response.read(chunksize) - outfh.write(chunk) - elapsed = time.time() - t0 - bytesdone += len(chunk) - if elapsed: - rate = ((bytesdone - offset) / 1024) / elapsed - eta = (total - bytesdone) / (rate * 1024) - else: # Avoid ZeroDivisionError - rate = 0 - eta = 0 - progress_stats = (bytesdone, bytesdone * 1.0 / total, rate, eta) - - if not chunk: - outfh.close() - break - - if not quiet: - status = status_string.format(*progress_stats) - sys.stdout.write("\r" + status + ' ' * 4 + "\r") - sys.stdout.flush() - - if callback: - callback(total, *progress_stats) - - if self._active: - - if remux_audio and self.mediatype == "audio": - remux(temp_filepath, filepath, quiet=quiet, muxer=remux_audio) - - else: - os.rename(temp_filepath, filepath) - - return filepath - - else: # download incomplete, return temp filepath - outfh.close() - return temp_filepath - - -class Pafy(object): - - """ Class to represent a YouTube video. """ - - funcmap = {} # keep functions as a class variable - - def __init__(self, video_url, basic=True, gdata=False, - signature=True, size=False, callback=lambda x: None): - """ Set initial values. """ - self.version = __version__ - self.videoid = extract_video_id(video_url) - self.watchv_url = g.urls['watchv'] % self.videoid - - new.callback = callback - self._have_basic = False - self._have_gdata = False - - self._description = None - self._likes = None - self._dislikes = None - self._category = None - self._published = None - self._username = None - - self.sm = [] - self.asm = [] - self.dash = [] - self.js_url = None # if js_url is set then has new stream map - self._dashurl = None - self.age_ver = False - self._streams = [] - self._oggstreams = [] - self._m4astreams = [] - self._allstreams = [] - self._videostreams = [] - self._audiostreams = [] - - self._title = None - self._thumb = None - self._rating = None - self._length = None - self._author = None - self._formats = None - self.ciphertag = None # used by Stream class in url property def - self._duration = None - self._keywords = None - self._bigthumb = None - self._viewcount = None - self._bigthumbhd = None - self._mix_pl = None - self.expiry = None - self.playlist_meta = None - - if basic: - self.fetch_basic() - - if gdata: - self._fetch_gdata() - - if size: - for s in self.allstreams: - # pylint: disable=W0104 - s.get_filesize() - - def fetch_basic(self): - """ Fetch basic data and streams. """ - if self._have_basic: - return - - self._fetch_basic() - sm_ciphertag = "s" in self.sm[0] - - if self.ciphertag != sm_ciphertag: - dbg("ciphertag mismatch") - self.ciphertag = not self.ciphertag - - if self.ciphertag: - dbg("Encrypted signature detected.") - - if not self.age_ver: - smaps, js_url, funcs, dashurl = get_js_sm(self.videoid) - Pafy.funcmap[js_url] = funcs - self.sm, self.asm = smaps - self.js_url = js_url - dashsig = re.search(r"/s/([\w\.]+)", dashurl).group(1) - dbg("decrypting dash sig") - goodsig = _decodesig(dashsig, js_url) - self._dashurl = re.sub(r"/s/[\w\.]+", - "/signature/%s" % goodsig, dashurl) - - else: - s = re.search(r"/s/([\w\.]+)", self._dashurl).group(1) - s = s[2:63] + s[82] + s[64:82] + s[63] - self._dashurl = re.sub(r"/s/[\w\.]+", - "/signature/%s" % s, self._dashurl) - - if self._dashurl != 'unknown': - self.dash = _extract_dash(self._dashurl) - self._have_basic = 1 - self._process_streams() - self.expiry = time.time() + g.lifespan - - def _fetch_basic(self, info_url=None): - """ Fetch info url page and set member vars. """ - allinfo = get_video_info(self.videoid, newurl=info_url) - - if allinfo.get("age_ver"): - self.age_ver = True - - new.callback("Fetched video info") - - def _get_lst(key, default="unknown", dic=allinfo): - """ Dict get function, returns first index. """ - retval = dic.get(key, default) - return retval[0] if retval != default else default - - self._title = _get_lst('title') - self._dashurl = _get_lst('dashmpd') - self._author = _get_lst('author') - self._rating = float(_get_lst('avg_rating', 0.0)) - self._length = int(_get_lst('length_seconds', 0)) - self._viewcount = int(_get_lst('view_count'), 0) - self._thumb = unquote_plus(_get_lst('thumbnail_url', "")) - self._formats = [x.split("/") for x in _get_lst('fmt_list').split(",")] - self._keywords = _get_lst('keywords', "").split(',') - self._bigthumb = _get_lst('iurlsd', "") - self._bigthumbhd = _get_lst('iurlsdmaxres', "") - self.ciphertag = _get_lst("use_cipher_signature") == "True" - self.sm = _extract_smap(g.UEFSM, allinfo, True) - self.asm = _extract_smap(g.AF, allinfo, True) - dbg("extracted stream maps") - - def _fetch_gdata(self): - """ Extract gdata values, fetch gdata if necessary. """ - if self._have_gdata: - return - - gdata = get_video_gdata(self.videoid) - item = json.loads(gdata)['items'][0] - snippet = item['snippet'] - self._published = uni(snippet['publishedAt']) - self._description = uni(snippet["description"]) - self._category = get_categoryname(snippet['categoryId']) - # TODO: Make sure actual usename is not available through the api - self._username = uni(snippet['channelTitle']) - statistics = item["statistics"] - self._likes = int(statistics["likeCount"]) - self._dislikes = int(statistics["dislikeCount"]) - self._have_gdata = 1 - - def _process_streams(self): - """ Create Stream object lists from internal stream maps. """ - if not self._have_basic: - self.fetch_basic() - - streams = [Stream(z, self) for z in self.sm] - streams = [x for x in streams if x.itag in g.itags] - adpt_streams = [Stream(z, self) for z in self.asm] - adpt_streams = [x for x in adpt_streams if x.itag in g.itags] - dash_streams = [Stream(z, self) for z in self.dash] - dash_streams = [x for x in dash_streams if x.itag in g.itags] - audiostreams = [x for x in adpt_streams if x.bitrate] - videostreams = [x for x in adpt_streams if not x.bitrate] - dash_itags = [x.itag for x in dash_streams] - audiostreams = [x for x in audiostreams if x.itag not in dash_itags] - videostreams = [x for x in videostreams if x.itag not in dash_itags] - audiostreams += [x for x in dash_streams if x.mediatype == "audio"] - videostreams += [x for x in dash_streams if x.mediatype != "audio"] - audiostreams = sorted(audiostreams, key=lambda x: x.rawbitrate, - reverse=True) - videostreams = sorted(videostreams, key=lambda x: x.dimensions, - reverse=True) - m4astreams = [x for x in audiostreams if x.extension == "m4a"] - oggstreams = [x for x in audiostreams if x.extension == "ogg"] - self._streams = streams - self._audiostreams = audiostreams - self._videostreams = videostreams - self._m4astreams, self._oggstreams = m4astreams, oggstreams - self._allstreams = streams + videostreams + audiostreams - - def __repr__(self): - """ Print video metadata. Return utf8 string. """ - if self._have_basic: - keys = "Title Author ID Duration Rating Views Thumbnail Keywords" - keys = keys.split(" ") - keywords = ", ".join(self.keywords) - info = {"Title": self.title, - "Author": self.author, - "Views": self.viewcount, - "Rating": self.rating, - "Duration": self.duration, - "ID": self.videoid, - "Thumbnail": self.thumb, - "Keywords": keywords} - - nfo = "\n".join(["%s: %s" % (k, info.get(k, "")) for k in keys]) - - else: - nfo = "Pafy object: %s [%s]" % (self.videoid, - self.title[:45] + "..") - - return nfo.encode("utf8", "replace") if pyver == 2 else nfo - - @property - def streams(self): - """ The streams for a video. Returns list.""" - self.fetch_basic() - return self._streams - - @property - def allstreams(self): - """ All stream types for a video. Returns list. """ - self.fetch_basic() - return self._allstreams - - @property - def audiostreams(self): - """ Return a list of audio Stream objects. """ - self.fetch_basic() - return self._audiostreams - - @property - def videostreams(self): - """ The video streams for a video. Returns list. """ - self.fetch_basic() - return self._videostreams - - @property - def oggstreams(self): - """ Return a list of ogg encoded Stream objects. """ - self.fetch_basic() - return self._oggstreams - - @property - def m4astreams(self): - """ Return a list of m4a encoded Stream objects. """ - self.fetch_basic() - return self._m4astreams - - @property - def title(self): - """ Return YouTube video title as a string. """ - if not self._title: - self.fetch_basic() - - return self._title - - @property - def author(self): - """ The uploader of the video. Returns str. """ - if not self._author: - self.fetch_basic() - - return self._author - - @property - def rating(self): - """ Rating for a video. Returns float. """ - if not self._rating: - self.fetch_basic() - - return self._rating - - @property - def length(self): - """ Length of a video in seconds. Returns int. """ - if not self._length: - self.fetch_basic() - - return self._length - - @property - def viewcount(self): - """ Number of views for a video. Returns int. """ - if not self._viewcount: - self.fetch_basic() - - return self._viewcount - - @property - def bigthumb(self): - """ Large thumbnail image url. Returns str. """ - self.fetch_basic() - return self._bigthumb - - @property - def bigthumbhd(self): - """ Extra large thumbnail image url. Returns str. """ - self.fetch_basic() - return self._bigthumbhd - - @property - def thumb(self): - """ Thumbnail image url. Returns str. """ - if not self._thumb: - self.fetch_basic() - - return self._thumb - - @property - def duration(self): - """ Duration of a video (HH:MM:SS). Returns str. """ - if not self._length: - self.fetch_basic() - - self._duration = time.strftime('%H:%M:%S', time.gmtime(self._length)) - self._duration = uni(self._duration) - - return self._duration - - @property - def keywords(self): - """ Return keywords as list of str. """ - self.fetch_basic() - return self._keywords - - @property - def category(self): - """ YouTube category of the video. Returns string. """ - self._fetch_gdata() - return self._category - - @property - def description(self): - """ Description of the video. Returns string. """ - if not self._description: - self._fetch_gdata() - - return self._description - - @property - def username(self): - """ Return the username of the uploader. """ - self._fetch_gdata() - return self._username - - @property - def published(self): - """ The upload date and time of the video. Returns string. """ - self._fetch_gdata() - return self._published.replace(".000Z", "").replace("T", " ") - - @property - def likes(self): - """ The number of likes for the video. Returns int. """ - self._fetch_gdata() - return self._likes - - @property - def dislikes(self): - """ The number of dislikes for the video. Returns int. """ - self._fetch_gdata() - return self._dislikes - - @property - def mix(self): - """ The playlist for the related YouTube mix. Returns a dict containing Pafy objects. """ - if self._mix_pl is None: - try: - self._mix_pl = get_playlist("RD" + self.videoid) - except IOError: - return None - return self._mix_pl - - def _getbest(self, preftype="any", ftypestrict=True, vidonly=False): - """ - Return the highest resolution video available. - - Select from video-only streams if vidonly is True - """ - streams = self.videostreams if vidonly else self.streams - - if not streams: - return None - - def _sortkey(x, key3d=0, keyres=0, keyftype=0): - """ sort function for max(). """ - key3d = "3D" not in x.resolution - keyres = int(x.resolution.split("x")[0]) - keyftype = preftype == x.extension - strict = (key3d, keyftype, keyres) - nonstrict = (key3d, keyres, keyftype) - return strict if ftypestrict else nonstrict - - r = max(streams, key=_sortkey) - - if ftypestrict and preftype != "any" and r.extension != preftype: - return None - - else: - return r - - def getbestvideo(self, preftype="any", ftypestrict=True): - """ - Return the best resolution video-only stream. - - set ftypestrict to False to return a non-preferred format if that - has a higher resolution - """ - return self._getbest(preftype, ftypestrict, vidonly=True) - - def getbest(self, preftype="any", ftypestrict=True): - """ - Return the highest resolution video+audio stream. - - set ftypestrict to False to return a non-preferred format if that - has a higher resolution - """ - return self._getbest(preftype, ftypestrict, vidonly=False) - - def getbestaudio(self, preftype="any", ftypestrict=True): - """ Return the highest bitrate audio Stream object.""" - if not self.audiostreams: - return None - - def _sortkey(x, keybitrate=0, keyftype=0): - """ Sort function for max(). """ - keybitrate = int(x.rawbitrate) - keyftype = preftype == x.extension - strict, nonstrict = (keyftype, keybitrate), (keybitrate, keyftype) - return strict if ftypestrict else nonstrict - - r = max(self.audiostreams, key=_sortkey) - - if ftypestrict and preftype != "any" and r.extension != preftype: - return None - - else: - return r - - def populate_from_playlist(self, pl_data): - """ Populate Pafy object with items fetched from playlist data. """ - self._title = pl_data.get("title") - self._author = pl_data.get("author") - self._length = int(pl_data.get("length_seconds", 0)) - self._rating = pl_data.get("rating", 0.0) - self._viewcount = "".join(re.findall(r"\d", pl_data.get("views", "0"))) - self._viewcount = int(self._viewcount) - self._thumb = pl_data.get("thumbnail") - self._description = pl_data.get("description") - self.playlist_meta = pl_data - - -def get_categoryname(cat_id): - """ Returns a list of video category names for one category ID. """ - timestamp = time.time() - cat_cache = cache('categories') - cached = cat_cache.get(cat_id, {}) - if cached.get('updated', 0) > timestamp - g.lifespan: - return cached.get('title', 'unknown') - # call videoCategories API endpoint to retrieve title - url = g.urls['vidcat'] - query = {'id': cat_id, - 'part': 'snippet', - 'key': g.api_key} - url += "?" + urlencode(query) - catinfo = json.loads(fetch_decode(url)) - try: - for item in catinfo.get('items', []): - title = item.get('snippet', {}).get('title', 'unknown') - cat_cache[cat_id] = {'title':title, 'updated':timestamp} - return title - cat_cache[cat_id] = {'updated':timestamp} - return 'unknown' - except Exception: - raise IOError("Error fetching category name for ID %s" % cat_id) - - -def set_categories(categories): - """ Take a dictionary mapping video category IDs to name and retrieval - time. All items are stored into cache node 'videoCategories', but - for the ones with a retrieval time too long ago, the v3 API is queried - before. - """ - timestamp = time.time() - idlist = [cid for cid, item in categories.items() - if item.get('updated', 0) < timestamp - g.lifespan] - if len(idlist) > 0: - url = g.urls['vidcat'] - query = {'id': ','.join(idlist), - 'part': 'snippet', - 'key': g.api_key} - url += "?" + urlencode(query) - catinfo = json.loads(fetch_decode(url)) - try: - for item in catinfo.get('items', []): - cid = item['id'] - title = item.get('snippet', {}).get('title', 'unknown') - categories[cid] = {'title':title, 'updated':timestamp} - except Exception: - raise IOError("Error fetching category name for IDs %s" % idlist) - cache('categories').update(categories) - - -def load_cache(newcache): - """Loads a dict into pafy's internal cache.""" - set_categories(newcache.get('categories', {})) - - -def dump_cache(): - """Returns pafy's cache for storing by program.""" - return g.cache - - -def get_playlist(playlist_url, basic=False, gdata=False, signature=True, - size=False, callback=lambda x: None): - """ Return a dict containing Pafy objects from a YouTube Playlist. - - The returned Pafy objects are initialised using the arguments to - get_playlist() in the manner documented for pafy.new() - - """ - # pylint: disable=R0914 - # too many local vars - - # Normal playlists start with PL, Mixes start with RD + first video ID - regx = re.compile(r'((?:RD|PL)[-_0-9a-zA-Z]+)') - m = regx.search(playlist_url) - - if not m: - err = "Unrecognized playlist url: %s" - raise ValueError(err % playlist_url) - - playlist_id = m.group(1) - url = g.urls["playlist"] % playlist_id - - try: - allinfo = fetch_decode(url) # unicode - allinfo = json.loads(allinfo) - - except: - raise IOError("Error fetching playlist %s" % m.groups(0)) - - # playlist specific metadata - playlist = dict( - playlist_id=playlist_id, - likes=allinfo.get('likes'), - title=allinfo.get('title'), - author=allinfo.get('author'), - dislikes=allinfo.get('dislikes'), - description=allinfo.get('description'), - items=[] - ) - - # playlist items specific metadata - for v in allinfo['video']: - - vid_data = dict( - added=v.get('added'), - is_cc=v.get('is_cc'), - is_hd=v.get('is_hd'), - likes=v.get('likes'), - title=v.get('title'), - views=v.get('views'), - rating=v.get('rating'), - author=v.get('author'), - user_id=v.get('user_id'), - privacy=v.get('privacy'), - start=v.get('start', 0.0), - dislikes=v.get('dislikes'), - duration=v.get('duration'), - comments=v.get('comments'), - keywords=v.get('keywords'), - thumbnail=v.get('thumbnail'), - cc_license=v.get('cc_license'), - category_id=v.get('category_id'), - description=v.get('description'), - encrypted_id=v.get('encrypted_id'), - time_created=v.get('time_created'), - time_updated=v.get('time_updated'), - length_seconds=v.get('length_seconds'), - end=v.get('end', v.get('length_seconds')) - ) - - try: - pafy_obj = new(vid_data['encrypted_id'], - basic=basic, - gdata=gdata, - signature=signature, - size=size, - callback=callback) - - except IOError as e: - callback("%s: %s" % (v['title'], e.message)) - continue - - pafy_obj.populate_from_playlist(vid_data) - playlist['items'].append(dict(pafy=pafy_obj, - playlist_meta=vid_data)) - callback("Added video: %s" % v['title']) - - return playlist - - -def set_api_key(key): - """Sets the api key to be used with youtube.""" - g.api_key = key diff --git a/lib/pyjsparser/__init__.py b/lib/pyjsparser/__init__.py index 2a9de69e..9b3114f2 100644 --- a/lib/pyjsparser/__init__.py +++ b/lib/pyjsparser/__init__.py @@ -1,4 +1,5 @@ -__all__ = ['PyJsParser', 'parse', 'JsSyntaxError'] +__all__ = ['PyJsParser', 'parse', 'JsSyntaxError', 'pyjsparserdata'] __author__ = 'Piotr Dabkowski' __version__ = '2.2.0' -from .parser import PyJsParser, parse, JsSyntaxError \ No newline at end of file +from .parser import PyJsParser, parse, JsSyntaxError +from . import pyjsparserdata \ No newline at end of file diff --git a/lib/pyjsparser/parser.py b/lib/pyjsparser/parser.py index 7360b23c..fa9843c5 100644 --- a/lib/pyjsparser/parser.py +++ b/lib/pyjsparser/parser.py @@ -23,12 +23,16 @@ from .std_nodes import * from pprint import pprint import sys -__all__ = ['PyJsParser', 'parse', 'ENABLE_JS2PY_ERRORS', 'ENABLE_PYIMPORT', 'JsSyntaxError'] -REGEXP_SPECIAL_SINGLE = ('\\', '^', '$', '*', '+', '?', '.', '[', ']', '(', ')', '{', '{', '|', '-') +__all__ = [ + 'PyJsParser', 'parse', 'ENABLE_JS2PY_ERRORS', 'ENABLE_PYIMPORT', + 'JsSyntaxError' +] +REGEXP_SPECIAL_SINGLE = ('\\', '^', '$', '*', '+', '?', '.', '[', ']', '(', + ')', '{', '{', '|', '-') ENABLE_PYIMPORT = False ENABLE_JS2PY_ERRORS = False -PY3 = sys.version_info >= (3,0) +PY3 = sys.version_info >= (3, 0) if PY3: basestring = str @@ -84,9 +88,9 @@ class PyJsParser: # 7.4 Comments def skipSingleLineComment(self, offset): - start = self.index - offset; + start = self.index - offset while self.index < self.length: - ch = self.source[self.index]; + ch = self.source[self.index] self.index += 1 if isLineTerminator(ch): if (ord(ch) == 13 and ord(self.source[self.index]) == 10): @@ -94,16 +98,9 @@ class PyJsParser: self.lineNumber += 1 self.hasLineTerminator = True self.lineStart = self.index - return { - 'type': 'Line', - 'value': self.source[start + offset:self.index-2], - 'leading': True, - 'trailing': False, - 'loc': None, - } + return def skipMultiLineComment(self): - start = self.index while self.index < self.length: ch = ord(self.source[self.index]) if isLineTerminator(ch): @@ -117,13 +114,7 @@ class PyJsParser: # Block comment ends with '*/'. if ord(self.source[self.index + 1]) == 0x2F: self.index += 2 - return { - 'type': 'Block', - 'value': self.source[start:self.index-2], - 'leading': True, - 'trailing': False, - 'loc': None, - } + return self.index += 1 else: self.index += 1 @@ -131,9 +122,7 @@ class PyJsParser: def skipComment(self): self.hasLineTerminator = False - startIndex = self.index start = (self.index == 0) - comments = [] while self.index < self.length: ch = ord(self.source[self.index]) if isWhiteSpace(ch): @@ -150,23 +139,24 @@ class PyJsParser: ch = ord(self.source[self.index + 1]) if (ch == 0x2F): self.index += 2 - comments.append(self.skipSingleLineComment(2)) + self.skipSingleLineComment(2) start = True elif (ch == 0x2A): # U+002A is '*' self.index += 2 - comments.append(self.skipMultiLineComment()) + self.skipMultiLineComment() else: break elif (start and ch == 0x2D): # U+002D is '-' # U+003E is '>' - if (ord(self.source[self.index + 1]) == 0x2D) and (ord(self.source[self.index + 2]) == 0x3E): + if (ord(self.source[self.index + 1]) == 0x2D) and (ord( + self.source[self.index + 2]) == 0x3E): # '-->' is a single-line comment self.index += 3 self.skipSingleLineComment(3) else: break elif (ch == 0x3C): # U+003C is '<' - if self.source[self.index + 1: self.index + 4] == '!--': + if self.source[self.index + 1:self.index + 4] == '!--': # + - + @@ -123,26 +125,6 @@ values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]" default="white" visible="eq(-17,true)+eq(-18,true)"/> --> - - - - - - - - - - - - - - - - - - - - @@ -169,6 +151,7 @@ + diff --git a/servers/decrypters/zcrypt.py b/servers/decrypters/zcrypt.py index 82be492f..c0e944fc 100644 --- a/servers/decrypters/zcrypt.py +++ b/servers/decrypters/zcrypt.py @@ -2,7 +2,7 @@ # Ringraziamo errmax e dr-z3r0 import re -from core import httptools, scrapertoolsV2 +from core import httptools, scrapertools from platformcode import logger from servers.decrypters import expurl @@ -62,10 +62,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= if '/olink/' in url: continue else: idata = httptools.downloadpage(url).data - data = scrapertoolsV2.find_single_match(idata, "]*src=\\'([^'>]*)\\'[^<>]*>") + data = scrapertools.find_single_match(idata, "]*src=\\'([^'>]*)\\'[^<>]*>") #fix by greko inizio if not data: - data = scrapertoolsV2.find_single_match(idata, 'action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">') + data = scrapertools.find_single_match(idata, 'action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">') from lib import unshortenit data, status = unshortenit.unshorten(url) # logger.info("Data - Status zcrypt linkup : [%s] [%s] " %(data, status)) diff --git a/servers/hdload.py b/servers/hdload.py index 46593695..dd09d15c 100644 --- a/servers/hdload.py +++ b/servers/hdload.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from core import httptools, scrapertoolsV2 +from core import httptools, scrapertools from platformcode import config, logger import base64 diff --git a/servers/mixdrop.py b/servers/mixdrop.py index e679dfbc..53961beb 100644 --- a/servers/mixdrop.py +++ b/servers/mixdrop.py @@ -1,48 +1,33 @@ # -*- coding: utf-8 -*- -import re +# -------------------------------------------------------- +# Conector Mixdrop By Alfa development Group +# -------------------------------------------------------- + from core import httptools -from core import scrapertoolsV2 -from platformcode import config, logger +from core import scrapertools from lib import jsunpack +from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) - - data = httptools.downloadpage(page_url, cookies=False).data - if 'WE ARE SORRY' in data: + global data + data = httptools.downloadpage(page_url).data + if "

    WE ARE SORRY

    " in data or '404 Not Found' in data: return False, config.get_localized_string(70449) % "MixDrop" - return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info() - itemlist = [] + logger.info("url=" + page_url) + video_urls = [] + ext = '.mp4' - # streaming url - data = httptools.downloadpage(page_url).data - data = re.sub(r'\n|\t|\r', ' ', data) - data = re.sub(r'>\s\s*<', '><', data) - jsCode = scrapertoolsV2.find_single_match(data, r'') - jsUnpacked = jsunpack.unpack(jsCode) - url = "https://" + scrapertoolsV2.find_single_match(jsUnpacked, r'vsr[^=]*="(?:/)?(/[^"]+)') + packed = scrapertools.find_single_match(data, r'(eval.*?)') + unpacked = jsunpack.unpack(packed) + media_url = scrapertools.find_single_match(unpacked, r'MDCore\.furl\s*=\s*"([^"]+)"') + if not media_url.startswith('http'): + media_url = 'http:%s' % media_url + video_urls.append(["%s [Mixdrop]" % ext, media_url]) - itemlist.append([".mp4 [MixDrop]", url]) - - # download url - # import urllib - # try: - # import json - # except: - # import simplejson as json - # page_url = page_url.replace('/e/', '/f/') + '?download' - # data = httptools.downloadpage(page_url).data - # csrf = scrapertoolsV2.find_single_match(data, '') - # postData = {'csrf': csrf, 'a': 'genticket'} - # resp = httptools.downloadpage(page_url, post=urllib.urlencode(postData)).data - # resp = json.loads(resp) - # if resp['type'] == 'ok': - # itemlist.append([".mp4 [MixDrop]", 'https:' + resp['url']]) - - return itemlist + return video_urls diff --git a/servers/onlystream.py b/servers/onlystream.py index 30de664e..ba1e7b4b 100644 --- a/servers/onlystream.py +++ b/servers/onlystream.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from core import httptools -from core import scrapertoolsV2 +from core import scrapertools from lib import jsunpack from platformcode import config, logger import ast @@ -22,10 +22,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls = [] data = httptools.downloadpage(page_url).data # logger.info(data) - block = scrapertoolsV2.find_single_match(data, r'sources: \[([^\]]+)\]') - sources = scrapertoolsV2.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?') + block = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]') + sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?') if not sources: - sources = scrapertoolsV2.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+",[^,]+,\s*label:\s*"([^"]+)"') + sources = scrapertools.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+",[^,]+,\s*label:\s*"([^"]+)"') for url, quality in sources: quality = 'auto' if not quality else quality video_urls.append(['.' + url.split('.')[-1] + ' [' + quality + '] [Onlystream]', url]) diff --git a/servers/supervideo.py b/servers/supervideo.py index 355a2762..4ef07daa 100644 --- a/servers/supervideo.py +++ b/servers/supervideo.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from core import httptools -from core import scrapertoolsV2 +from core import scrapertools from lib import jsunpack from platformcode import config, logger import ast @@ -22,10 +22,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls = [] data = httptools.downloadpage(page_url).data logger.info('SUPER DATA= '+data) - code_data = scrapertoolsV2.find_single_match(data, "