diff --git a/core/httptools.py b/core/httptools.py index afa8c9ce..a8a1a517 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -417,7 +417,8 @@ def downloadpage(url, **opt): response['url'] = response['url'].replace('https://web.archive.org/save/', '') if type(response['data']) != str: - response['data'] = response['data'].decode('UTF-8') + try: response['data'] = response['data'].decode('utf-8') + except: response['data'] = response['data'].decode('ISO-8859-1') if not response['data']: response['data'] = '' diff --git a/specials/trailertools.py b/specials/trailertools.py index b291d4f2..295d3e7f 100644 --- a/specials/trailertools.py +++ b/specials/trailertools.py @@ -176,8 +176,7 @@ def youtube_search(item): patron += 'url":"([^"]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedthumbnail, scrapedtitle, scrapedduration, scrapedurl in matches: - scrapedtitle = scrapedtitle.decode('utf8').encode('utf8') - scrapedtitle = scrapedtitle + " (" + scrapedduration + ")" + scrapedtitle = scrapedtitle if PY3 else scrapedtitle.decode('utf8').encode('utf8') + " (" + scrapedduration + ")" if item.contextual: scrapedtitle = "%s" % scrapedtitle url = urlparse.urljoin('https://www.youtube.com/', scrapedurl) @@ -208,7 +207,7 @@ def abandomoviez_search(item): if item.page != "": data = httptools.downloadpage(item.page).data else: - titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1') + titulo = item.contentTitle if PY3 else item.contentTitle.decode('utf-8').encode('iso-8859-1') post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1', 'anioin': item.year, 'anioout': item.year, 'orderby': '1'}) url = "http://www.abandomoviez.net/db/busca_titulo.php?busco2=%s" %item.contentTitle @@ -217,7 +216,8 @@ def abandomoviez_search(item): if "No hemos encontrado ninguna" in data: url = "http://www.abandomoviez.net/indie/busca_titulo.php?busco2=%s" %item.contentTitle item.prefix = "indie/" - data = httptools.downloadpage(url, post=post).data.decode("iso-8859-1").encode('utf-8') + data = httptools.downloadpage(url, post=post).data + if not PY3: data = data.decode("iso-8859-1").encode('utf-8') itemlist = [] patron = '(?: