#!/usr/bin/env python # -*- coding: utf-8 -*- import os, re, sys, json, time if sys.version_info[0] >= 3: PY3 = True from urllib.parse import urlsplit, urlparse, parse_qs, urljoin, urlencode from urllib.request import urlopen else: PY3 = False from urllib import urlencode, urlopen from urlparse import urlsplit, urlparse, parse_qs, urljoin from base64 import b64decode from core import httptools, scrapertools from platformcode import config, logger def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL): rec = re.compile(regex, flags=flags) match = rec.search(text) if not match: return False return match.group(1) class UnshortenIt(object): _adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net|thouth\.net|uclaut\.net|gloyah\.net|larati\.net|scuseami\.net' _linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co' _adfocus_regex = r'adfoc\.us' _lnxlu_regex = r'lnx\.lu' _shst_regex = r'sh\.st|festyy\.com|ceesty\.com' _hrefli_regex = r'href\.li' _anonymz_regex = r'anonymz\.com' _shrink_service_regex = r'shrink-service\.it' _rapidcrypt_regex = r'rapidcrypt\.net' _vcrypt_regex = r'vcrypt\.net|vcrypt\.pw' _linkup_regex = r'linkup\.pro|buckler.link' _linkhub_regex = r'linkhub\.icu' _swzz_regex = r'swzz\.xyz' _stayonline_regex = r'stayonline\.pro' # for services that only include real link inside iframe _simple_iframe_regex = r'cryptmango|xshield\.net|vcrypt\.club' # for services that only do redirects _simple_redirect = r'streamcrypt\.net/[^/]+' listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex, _shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _vcrypt_regex, _linkup_regex, _linkhub_regex, _swzz_regex, _stayonline_regex, _simple_redirect] _maxretries = 5 _this_dir, _this_filename = os.path.split(__file__) _timeout = 10 def unshorten(self, uri, type=None): code = 0 while True: oldUri = uri domain = urlsplit(uri).netloc if not domain: return uri, "No domain found in URI!" had_google_outbound, uri = self._clear_google_outbound_proxy(uri) if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly': uri, code = self._unshorten_adfly(uri) if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus': uri, code = self._unshorten_adfocus(uri) if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks': uri, code = self._unshorten_linkbucks(uri) if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu': uri, code = self._unshorten_lnxlu(uri) if re.search(self._shrink_service_regex, domain, re.IGNORECASE): uri, code = self._unshorten_shrink_service(uri) if re.search(self._shst_regex, domain, re.IGNORECASE): uri, code = self._unshorten_shst(uri) if re.search(self._hrefli_regex, domain, re.IGNORECASE): uri, code = self._unshorten_hrefli(uri) if re.search(self._anonymz_regex, domain, re.IGNORECASE): uri, code = self._unshorten_anonymz(uri) if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE): uri, code = self._unshorten_rapidcrypt(uri) if re.search(self._simple_iframe_regex, uri, re.IGNORECASE): uri, code = self._unshorten_simple_iframe(uri) if re.search(self._vcrypt_regex, uri, re.IGNORECASE): uri, code = self._unshorten_vcrypt(uri) if re.search(self._linkup_regex, uri, re.IGNORECASE): uri, code = self._unshorten_linkup(uri) if re.search(self._linkhub_regex, uri, re.IGNORECASE): uri, code = self._unshorten_linkhub(uri) if re.search(self._swzz_regex, uri, re.IGNORECASE): uri, code = self._unshorten_swzz(uri) if re.search(self._stayonline_regex, uri, re.IGNORECASE): uri, code = self._unshorten_stayonline(uri) if re.search(self._simple_redirect, uri, re.IGNORECASE): p = httptools.downloadpage(uri) uri = p.url code = p.code if oldUri == uri: break logger.info(uri) return uri, code def unwrap_30x(self, uri, timeout=10): def unwrap_30x(uri, timeout=10): domain = urlsplit(uri).netloc self._timeout = timeout try: # headers stop t.co from working so omit headers if this is a t.co link if domain == 't.co': r = httptools.downloadpage(uri, timeout=self._timeout) return r.url, r.code # p.ost.im uses meta http refresh to redirect. if domain == 'p.ost.im': r = httptools.downloadpage(uri, timeout=self._timeout) uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0] return uri, r.code retries = 0 while True: r = httptools.downloadpage( uri, timeout=self._timeout, cookies=False, follow_redirects=False) if not r.success: return uri, -1 if 'snip.' not in r.url and 'location' in r.headers and retries < self._maxretries: r = httptools.downloadpage( r.headers['location'], cookies=False, follow_redirects=False) uri = r.url retries += 1 else: return r.url, r.code except Exception as e: return uri, str(e) uri, code = unwrap_30x(uri, timeout) if 'vcrypt' in uri and 'fastshield' in uri: # twince because of cookies httptools.downloadpage( uri, timeout=self._timeout, post='go=go') r = httptools.downloadpage( uri, timeout=self._timeout, post='go=go') return r.url, r.code return uri, code def _clear_google_outbound_proxy(self, url): ''' So google proxies all their outbound links through a redirect so they can detect outbound links. This call strips them out if they are present. This is useful for doing things like parsing google search results, or if you're scraping google docs, where google inserts hit-counters on all outbound links. ''' # This is kind of hacky, because we need to check both the netloc AND # part of the path. We could use urllib.parse.urlsplit, but it's # easier and just as effective to use string checks. if url.startswith("http://www.google.com/url?") or \ url.startswith("https://www.google.com/url?"): qs = urlparse(url).query query = parse_qs(qs) if "q" in query: # Google doc outbound links (maybe blogspot, too) return True, query["q"].pop() elif "url" in query: # Outbound links from google searches return True, query["url"].pop() else: raise ValueError( "Google outbound proxy URL without a target url ('%s')?" % url) return False, url def _unshorten_adfly(self, uri): try: r = httptools.downloadpage( uri, timeout=self._timeout, cookies=False) html = r.data ysmm = re.findall(r"var ysmm =.*\;?", html) if len(ysmm) > 0: ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0]) left = '' right = '' for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]: left += c[0] right = c[1] + right # Additional digit arithmetic encoded_uri = list(left + right) numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n)) for first, second in zip(numbers, numbers): xor = int(first[1]) ^ int(second[1]) if xor < 10: encoded_uri[first[0]] = str(xor) decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode() if re.search(r'go\.php\?u\=', decoded_uri): decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode() return decoded_uri, r.code else: return uri, 'No ysmm variable found' except Exception as e: return uri, str(e) def _unshorten_linkbucks(self, uri): ''' (Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase. This has necessidated a license change. ''' if config.is_xbmc(): import xbmc r = httptools.downloadpage(uri, timeout=self._timeout) firstGet = time.time() baseloc = r.url if "/notfound/" in r.url or \ "(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data: return uri, 'Error: Link not found or requires a survey!' link = None content = r.data regexes = [ r"