diff --git a/channels.json b/channels.json index 2e4cce11..479c9c33 100644 --- a/channels.json +++ b/channels.json @@ -43,7 +43,8 @@ "streamingcommunity": "https://streamingcommunity.net", "streamtime": "https://t.me/s/StreamTime", "toonitalia": "https://toonitalia.pro", - "vvvvid": "https://www.vvvvid.it" + "vvvvid": "https://www.vvvvid.it", + "paramount": "https://www.paramountnetwork.it" }, "findhost": { "altadefinizione01": "https://altadefinizione01-nuovo.info", diff --git a/channels/paramount.py b/channels/paramount.py index 17d545e6..aed03c53 100644 --- a/channels/paramount.py +++ b/channels/paramount.py @@ -6,8 +6,7 @@ import inspect from core import support, jsontools from platformcode import autorenumber, logger -# host = support.config.get_channel_url() -host = 'https://www.paramountnetwork.it' +host = support.config.get_channel_url() headers = [['Referer', host]] diff --git a/channels/raiplay.py b/channels/raiplay.py index e1b5bf8a..f66ef901 100644 --- a/channels/raiplay.py +++ b/channels/raiplay.py @@ -53,7 +53,7 @@ def learning(item): json = current_session.get(item.url).json()['contents'] for key in json: itemlist.append(item.clone(title = support.typo(key['name'],'bold'), fulltitle = key['name'], - show = key['name'], url = key['contents'], action = 'peliculas')) + show = key['name'], data = key['contents'], action = 'peliculas')) return itemlist @@ -82,9 +82,14 @@ def replay_menu(item): # create day and month list days = [] months = [] - days.append(xbmc.getLocalizedString(17)) - for day in range(11, 17): days.append(xbmc.getLocalizedString(day)) - for month in range(21, 33): months.append(xbmc.getLocalizedString(month)) + try: + days.append(xbmc.getLocalizedString(17)) + for day in range(11, 17): days.append(xbmc.getLocalizedString(day)) + for month in range(21, 33): months.append(xbmc.getLocalizedString(month)) + except: # per i test, xbmc.getLocalizedString non รจ supportato + days.append('dummy') + for day in range(11, 17): days.append('dummy') + for month in range(21, 33): months.append('dummy') # make menu itemlist = [] @@ -184,8 +189,8 @@ def peliculas(item): pagination = 40 if not item.search else '' # load json - if type(item.url) in [dict, list]: - json = item.url + if item.data: + json = item.data for key in json: if item.search.lower() in key['name'].lower(): keys.append(key) diff --git a/lib/unshortenit.py b/lib/unshortenit.py index 0bcb8c87..2cda8301 100644 --- a/lib/unshortenit.py +++ b/lib/unshortenit.py @@ -31,7 +31,7 @@ class UnshortenIt(object): _linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co' _adfocus_regex = r'adfoc\.us' _lnxlu_regex = r'lnx\.lu' - _shst_regex = r'sh\.st|festyy\.com|ceesty\.com' + _shst_regex = r'sh\.st|shorte\.st|sh\.st|clkmein\.com|viid\.me|xiw34\.com|corneey\.com|gestyy\.com|cllkme\.com|festyy\.com|destyy\.com|ceesty\.com' _hrefli_regex = r'href\.li' _anonymz_regex = r'anonymz\.com' _shrink_service_regex = r'shrink-service\.it' @@ -381,41 +381,43 @@ class UnshortenIt(object): def _unshorten_shst(self, uri): try: - r = httptools.downloadpage(uri, timeout=self._timeout) - html = r.data - session_id = re.findall(r'sessionId\:(.*?)\"\,', html) - if len(session_id) > 0: - session_id = re.sub(r'\s\"', '', session_id[0]) - - http_header = dict() - http_header["Content-Type"] = "application/x-www-form-urlencoded" - http_header["Host"] = "sh.st" - http_header["Referer"] = uri - http_header["Origin"] = "http://sh.st" - http_header["X-Requested-With"] = "XMLHttpRequest" - - if config.is_xbmc(): - import xbmc - xbmc.sleep(5 * 1000) - else: - time.sleep(5 * 1000) - - payload = {'adSessionId': session_id, 'callback': 'c'} - r = httptools.downloadpage( - 'http://sh.st/shortest-url/end-adsession?' + - urlencode(payload), - headers=http_header, - timeout=self._timeout) - response = r.data[6:-2].decode('utf-8') - - if r.code == 200: - resp_uri = json.loads(response)['destinationUrl'] - if resp_uri is not None: - uri = resp_uri - else: - return uri, 'Error extracting url' - else: - return uri, 'Error extracting url' + # act like a crawler + r = httptools.downloadpage(uri, timeout=self._timeout, headers=[['User-Agent', '']]) + uri = r.url + # html = r.data + # session_id = re.findall(r'sessionId\:(.*?)\"\,', html) + # if len(session_id) > 0: + # session_id = re.sub(r'\s\"', '', session_id[0]) + # + # http_header = dict() + # http_header["Content-Type"] = "application/x-www-form-urlencoded" + # http_header["Host"] = "sh.st" + # http_header["Referer"] = uri + # http_header["Origin"] = "http://sh.st" + # http_header["X-Requested-With"] = "XMLHttpRequest" + # + # if config.is_xbmc(): + # import xbmc + # xbmc.sleep(5 * 1000) + # else: + # time.sleep(5 * 1000) + # + # payload = {'adSessionId': session_id, 'callback': 'c'} + # r = httptools.downloadpage( + # 'http://sh.st/shortest-url/end-adsession?' + + # urlencode(payload), + # headers=http_header, + # timeout=self._timeout) + # response = r.data[6:-2].decode('utf-8') + # + # if r.code == 200: + # resp_uri = json.loads(response)['destinationUrl'] + # if resp_uri is not None: + # uri = resp_uri + # else: + # return uri, 'Error extracting url' + # else: + # return uri, 'Error extracting url' return uri, r.code diff --git a/tests/test_generic.py b/tests/test_generic.py index bc39d682..078160b5 100644 --- a/tests/test_generic.py +++ b/tests/test_generic.py @@ -62,6 +62,7 @@ validUrlRegex = re.compile( r'(?:/?|[/?]\S+)$', re.IGNORECASE) chBlackList = ['url', 'mediasetplay'] +srvBalcklist = ['mega', 'hdmario', 'torrent', 'youtube'] chNumRis = { 'altadefinizione01': { 'Film': 20 @@ -141,8 +142,7 @@ chNumRis = { servers = [] channels = [] -# channel_list = channelselector.filterchannels("all") if 'KOD_TST_CH' not in os.environ else [Item(channel=os.environ['KOD_TST_CH'], action="mainlist")] -channel_list = [Item(channel='tantifilm', action="mainlist")] +channel_list = channelselector.filterchannels("all") if 'KOD_TST_CH' not in os.environ else [Item(channel=os.environ['KOD_TST_CH'], action="mainlist")] logger.info([c.channel for c in channel_list]) ret = [] @@ -198,8 +198,10 @@ for chItem in channel_list: if itPlay: tmp.append(itPlay[0]) serversFound[it.title] = tmp - servers.extend( - {'name': srv.server.lower(), 'server': srv} for srv in serversFound[it.title] if srv.server) + for srv in serversFound[it.title]: + if srv.server: + srv.foundOn = ch + ' --> ' + it.title + ' --> ' + resIt.title + servers.append({'name': srv.server.lower(), 'server': srv}) break except: import traceback @@ -304,6 +306,9 @@ class GenericServerTest(unittest.TestCase): module = __import__('servers.%s' % self.name, fromlist=["servers.%s" % self.name]) page_url = self.server.url print('testing ' + page_url) + print('Found on ' + self.server.foundOn) + print() + self.assert_(hasattr(module, 'test_video_exists'), self.name + ' has no test_video_exists') if module.test_video_exists(page_url)[0]: @@ -327,7 +332,10 @@ class GenericServerTest(unittest.TestCase): print(headers) if 'magnet:?' in directUrl: # check of magnet links not supported continue - page = downloadpage(directUrl, headers=headers, only_headers=True, use_requests=True, verify=False) + if directUrl.split('.')[-1] == 'm3u8': # m3u8 is a text file and HEAD may be forbidden + page = downloadpage(directUrl, headers=headers, use_requests=True, verify=False) + else: + page = downloadpage(directUrl, headers=headers, only_headers=True, use_requests=True, verify=False) self.assertTrue(page.success, self.name + ' scraper returned an invalid link') self.assertLess(page.code, 400, self.name + ' scraper returned a ' + str(page.code) + ' link') contentType = page.headers['Content-Type']