#!/usr/bin/env python # -*- coding: utf-8 -*- try: from urllib.parse import urlsplit, urlparse, parse_qs, urljoin except: from urlparse import urlsplit, urlparse, parse_qs, urljoin import json import os import re import time import urllib from base64 import b64decode from platformcode import logger import xbmc from core import httptools def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL): rec = re.compile(regex, flags=flags) match = rec.search(text) if not match: return False return match.group(1) class UnshortenIt(object): _adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|threadsphere\.bid|restorecosm\.bid|clearload\.bid' _linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co' _adfocus_regex = r'adfoc\.us' _lnxlu_regex = r'lnx\.lu' _shst_regex = r'sh\.st|gestyy\.com' _hrefli_regex = r'href\.li' _anonymz_regex = r'anonymz\.com' _shrink_service_regex = r'shrink-service\.it' _rapidcrypt_regex = r'rapidcrypt\.net' _maxretries = 5 _this_dir, _this_filename = os.path.split(__file__) _timeout = 10 def unshorten(self, uri, type=None): domain = urlsplit(uri).netloc if not domain: return uri, "No domain found in URI!" had_google_outbound, uri = self._clear_google_outbound_proxy(uri) if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly': return self._unshorten_adfly(uri) if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus': return self._unshorten_adfocus(uri) if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks': return self._unshorten_linkbucks(uri) if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu': return self._unshorten_lnxlu(uri) if re.search(self._shrink_service_regex, domain, re.IGNORECASE): return self._unshorten_shrink_service(uri) if re.search(self._shst_regex, domain, re.IGNORECASE): return self._unshorten_shst(uri) if re.search(self._hrefli_regex, domain, re.IGNORECASE): return self._unshorten_hrefli(uri) if re.search(self._anonymz_regex, domain, re.IGNORECASE): return self._unshorten_anonymz(uri) if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE): return self._unshorten_rapidcrypt(uri) return uri, 200 def unwrap_30x(self, uri, timeout=10): domain = urlsplit(uri).netloc self._timeout = timeout loop_counter = 0 try: if loop_counter > 5: raise ValueError("Infinitely looping redirect from URL: '%s'" % (uri,)) # headers stop t.co from working so omit headers if this is a t.co link if domain == 't.co': r = httptools.downloadpage(uri, timeout=self._timeout) return r.url, r.code # p.ost.im uses meta http refresh to redirect. if domain == 'p.ost.im': r = httptools.downloadpage(uri, timeout=self._timeout) uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0] return uri, r.code else: while True: r = httptools.downloadpage( uri, timeout=self._timeout, follow_redirects=False, only_headers=True) if not r.success: return uri, -1 retries = 0 if 'location' in r.headers and retries < self._maxretries: r = httptools.downloadpage( r.headers['location'], follow_redirects=False, only_headers=True) uri = r.url loop_counter += 1 retries = retries + 1 else: return r.url, r.code except Exception as e: return uri, str(e) def _clear_google_outbound_proxy(self, url): ''' So google proxies all their outbound links through a redirect so they can detect outbound links. This call strips them out if they are present. This is useful for doing things like parsing google search results, or if you're scraping google docs, where google inserts hit-counters on all outbound links. ''' # This is kind of hacky, because we need to check both the netloc AND # part of the path. We could use urllib.parse.urlsplit, but it's # easier and just as effective to use string checks. if url.startswith("http://www.google.com/url?") or \ url.startswith("https://www.google.com/url?"): qs = urlparse(url).query query = parse_qs(qs) if "q" in query: # Google doc outbound links (maybe blogspot, too) return True, query["q"].pop() elif "url" in query: # Outbound links from google searches return True, query["url"].pop() else: raise ValueError( "Google outbound proxy URL without a target url ('%s')?" % url) return False, url def _unshorten_adfly(self, uri): logger.info() try: r = httptools.downloadpage( uri, timeout=self._timeout, cookies=False) html = r.data ysmm = re.findall(r"var ysmm =.*\;?", html) if len(ysmm) > 0: ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0]) left = '' right = '' for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]: left += c[0] right = c[1] + right # Additional digit arithmetic encoded_uri = list(left + right) numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n)) for first, second in zip(numbers, numbers): xor = int(first[1]) ^ int(second[1]) if xor < 10: encoded_uri[first[0]] = str(xor) decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode() if re.search(r'go\.php\?u\=', decoded_uri): decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode() return decoded_uri, r.code else: return uri, 'No ysmm variable found' except Exception as e: return uri, str(e) def _unshorten_linkbucks(self, uri): ''' (Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase. This has necessidated a license change. ''' r = httptools.downloadpage(uri, timeout=self._timeout) firstGet = time.time() baseloc = r.url if "/notfound/" in r.url or \ "(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data: return uri, 'Error: Link not found or requires a survey!' link = None content = r.data regexes = [ r"