Fix animesaturn, dreamsub, polpotv.
Alcune modifiche e migliorati i test
This commit is contained in:
@@ -84,7 +84,7 @@ def peliculas(item):
|
||||
action = 'findvideos'
|
||||
def itemlistHook(itemlist):
|
||||
if page:
|
||||
itemlist.append(item(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),page= page, thumbnail=support.thumb()))
|
||||
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),page= page, thumbnail=support.thumb()))
|
||||
return itemlist
|
||||
else:
|
||||
pagination = ''
|
||||
|
||||
@@ -76,6 +76,7 @@ def newest(categoria):
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
# debug = True
|
||||
anime = True
|
||||
if 'movie' in item.url:
|
||||
item.contentType = 'movie'
|
||||
@@ -88,7 +89,7 @@ def peliculas(item):
|
||||
patronBlock = r'<div id="%s"[^>]+>(?P<block>.*?)<div class="vistaDettagliata"' % item.args[1]
|
||||
patron = r'<li>\s*<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)" class="thumb">[^>]+>[^>]+>[^>]+>\s*[EePp]+\s*(?P<episode>\d+)[^>]+>[^>]+>[^>]+>(?P<lang>[^<]*)<[^>]+>[^>]+>\s<img src="(?P<thumb>[^"]+)"'
|
||||
else:
|
||||
patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+)[^>]+>[^>]+>\s*Stato streaming: (?:[^<]+)<[^>]+>[^>]+>\s*Lingua:[ ](?P<lang>ITA\/JAP|ITA|JAP|SUB ITA)?[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<div class="[^"]+" style="background: url\((?P<thumb>[^\)]+)\)'
|
||||
patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+)[^>]+>[^>]+>\s*<span>Lingua:\s*(?P<lang>[^>]+)?>[<>br\s]+a href="(?P<url>[^"]+)"[^>]+>.*?--image-url:url\(/*(?P<thumb>[^\)]+).*?Anno di inizio</b>:\s*(?P<year>[0-9]{4})'
|
||||
patronNext = '<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">'
|
||||
|
||||
return locals()
|
||||
|
||||
@@ -205,19 +205,19 @@ def get_itemlist_element(element,item):
|
||||
next_action='episodios'
|
||||
quality=''
|
||||
url="%s%s"
|
||||
|
||||
if item.contentType=='movie':
|
||||
support.tmdb.set_infoLabels_itemlist(itemlist)
|
||||
itemlist.append(
|
||||
item.clone(action=next_action,
|
||||
title=support.typo(scrapedtitle,'bold') + quality,
|
||||
title=support.typo(scrapedtitle, 'bold') + quality,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=scrapedplot,
|
||||
fanart=scrapedfanart,
|
||||
thumbnail=scrapedthumbnail,
|
||||
contentTitle=scrapedtitle,
|
||||
url=url %(host,element['@id'] ),
|
||||
url=url % (host, element['@id']),
|
||||
infoLabels=infoLabels))
|
||||
|
||||
if item.contentType=='movie':
|
||||
for item in itemlist:
|
||||
item= support.tmdb.find_and_set_infoLabels(item)
|
||||
return itemlist
|
||||
|
||||
@@ -296,7 +296,10 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
|
||||
if parsedTitle.get('screen_size'):
|
||||
quality += ' ' + str(parsedTitle.get('screen_size', ''))
|
||||
if not scraped['year']:
|
||||
infolabels['year'] = parsedTitle.get('year', '')
|
||||
if type(parsedTitle.get('year', '')) == list:
|
||||
infolabels['year'] =parsedTitle.get('year', '')[0]
|
||||
else:
|
||||
infolabels['year'] = parsedTitle.get('year', '')
|
||||
if parsedTitle.get('episode') and parsedTitle.get('season'):
|
||||
longtitle = title + s
|
||||
|
||||
|
||||
@@ -462,3 +462,7 @@ def verify_directories_created():
|
||||
import traceback
|
||||
logger.error("When checking or creating the resolution folder")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
|
||||
def get_online_server_thumb(server):
|
||||
return "https://github.com/kodiondemand/media/raw/master/resources/servers/" + server.lower() + '.png'
|
||||
@@ -456,7 +456,7 @@ def play_from_library(item):
|
||||
options = []
|
||||
selection_implementation = 0
|
||||
for item in itemlist:
|
||||
item.thumbnail = "https://github.com/kodiondemand/media/raw/master/resources/servers/" + item.server.lower() + '.png'
|
||||
item.thumbnail = config.get_online_server_thumb(item.server)
|
||||
quality = '[B][' + item.quality + '][/B]' if item.quality else ''
|
||||
if item.server:
|
||||
it = xbmcgui.ListItem('\n[B]%s[/B] %s - %s' % (item.server, quality, item.contentTitle))
|
||||
|
||||
@@ -175,7 +175,7 @@ def render_items(itemlist, parent_item):
|
||||
if item.fanart == "":
|
||||
item.fanart = parent_item.fanart
|
||||
if item.action == 'play' and thumb_type == 1 and not item.forcethumb:
|
||||
item.thumbnail = "https://github.com/kodiondemand/media/raw/master/resources/servers/" + item.server.lower() + '.png'
|
||||
item.thumbnail = config.get_online_server_thumb(item.server)
|
||||
|
||||
# if cloudflare and cloudscraper is used, cookies are needed to display images taken from site
|
||||
# before checking domain (time consuming), checking if tmdb failed (so, images scraped from website are used)
|
||||
|
||||
140
tests.py
140
tests.py
@@ -5,6 +5,7 @@ import unittest
|
||||
import parameterized
|
||||
|
||||
from platformcode import config
|
||||
|
||||
librerias = os.path.join(config.get_runtime_path(), 'lib')
|
||||
sys.path.insert(0, librerias)
|
||||
from core.support import typo
|
||||
@@ -12,57 +13,96 @@ from core.item import Item
|
||||
import channelselector
|
||||
from core import servertools
|
||||
import re
|
||||
|
||||
validUrlRegex = re.compile(
|
||||
r'^(?:http|ftp)s?://' # http:// or https://
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
|
||||
r'localhost|' #localhost...
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
|
||||
r'(?::\d+)?' # optional port
|
||||
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
||||
r'^(?:http|ftp)s?://' # http:// or https://
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
|
||||
r'localhost|' # localhost...
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
|
||||
r'(?::\d+)?' # optional port
|
||||
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
||||
|
||||
chBlackList = ['url']
|
||||
chNumRis = {
|
||||
'altadefinizione01': {
|
||||
'film': 20
|
||||
'Film': 20
|
||||
},
|
||||
'altadefinizione01_link': {
|
||||
'film': 16,
|
||||
'serie': 16,
|
||||
},
|
||||
'Film': 16,
|
||||
'Serie TV': 16,
|
||||
},
|
||||
'altadefinizioneclick': {
|
||||
'film': 36,
|
||||
'serie': 12,
|
||||
},
|
||||
'Film': 36,
|
||||
'Serie TV': 12,
|
||||
},
|
||||
'casacinema': {
|
||||
'film': 10,
|
||||
'serie': 10,
|
||||
},
|
||||
'Film': 10,
|
||||
'Serie TV': 10,
|
||||
},
|
||||
'cineblog01': {
|
||||
'film': 12,
|
||||
'serie': 13
|
||||
},
|
||||
'Film': 12,
|
||||
'Serie TV': 13
|
||||
},
|
||||
'cinemalibero': {
|
||||
'film': 20,
|
||||
'serie': 20,
|
||||
},
|
||||
'Film': 20,
|
||||
'Serie TV': 20,
|
||||
},
|
||||
'cinetecadibologna': {
|
||||
'film': 10
|
||||
},
|
||||
'Film': 10
|
||||
},
|
||||
'eurostreaming': {
|
||||
'serie': 18
|
||||
},
|
||||
'filmpertutti': {
|
||||
'film': 24,
|
||||
'serie': 24,
|
||||
},
|
||||
'guardaserieclick': {
|
||||
'da controllare': 0
|
||||
},
|
||||
'Serie TV': 18
|
||||
},
|
||||
'Filmpertutti': {
|
||||
'Film': 24,
|
||||
'Serie TV': 24,
|
||||
},
|
||||
'guardaSerie TVclick': {
|
||||
'da controllare': 0
|
||||
},
|
||||
'hd4me': {
|
||||
'film': 10
|
||||
},
|
||||
'Film': 10
|
||||
},
|
||||
'ilgeniodellostreaming': {
|
||||
'Film': 30,
|
||||
'Serie TV': 30
|
||||
},
|
||||
'italiaserie': {
|
||||
'Serie TV': 20
|
||||
},
|
||||
'casacinemaInfo': {
|
||||
'Film': 150
|
||||
},
|
||||
'netfreex': {
|
||||
'Film': 30,
|
||||
'Serie TV': 30
|
||||
},
|
||||
'piratestreaming': {
|
||||
'Film': 24,
|
||||
'Serie TV': 24
|
||||
},
|
||||
'polpotv': {
|
||||
'Film': 12,
|
||||
'Serie TV': 12
|
||||
},
|
||||
'streamingaltadefinizione': {
|
||||
'Film': 30,
|
||||
'Serie TV': 30
|
||||
},
|
||||
'seriehd': {
|
||||
'Serie TV': 12
|
||||
},
|
||||
'serietvonline': {
|
||||
'Film': 35,
|
||||
'Serie TV': 35
|
||||
},
|
||||
'tantifilm': {
|
||||
'Film': 20,
|
||||
'Serie TV': 20
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def getChannels():
|
||||
channel_list = channelselector.filterchannels("all")
|
||||
ret = []
|
||||
@@ -72,7 +112,9 @@ def getChannels():
|
||||
ret.append({'ch': ch})
|
||||
return ret
|
||||
|
||||
|
||||
from specials import news
|
||||
|
||||
dictNewsChannels, any_active = news.get_channels_list()
|
||||
|
||||
srvLinkDict = {
|
||||
@@ -80,6 +122,7 @@ srvLinkDict = {
|
||||
"akvideo": ["https://akvideo.stream/video.php?file_code=23god95lrtqv"]
|
||||
}
|
||||
|
||||
|
||||
def getServers():
|
||||
server_list = servertools.get_servers_list()
|
||||
ret = []
|
||||
@@ -101,6 +144,7 @@ class GenericChannelTest(unittest.TestCase):
|
||||
self.assertTrue(mainlist, 'channel ' + self.ch + ' has no menu')
|
||||
|
||||
for it in mainlist:
|
||||
it.title = it.title.encode('ascii', 'ignore')
|
||||
if it.action == 'channel_config':
|
||||
hasChannelConfig = True
|
||||
continue
|
||||
@@ -108,18 +152,32 @@ class GenericChannelTest(unittest.TestCase):
|
||||
continue
|
||||
itemlist = getattr(self.module, it.action)(it)
|
||||
self.assertTrue(itemlist, 'channel ' + self.ch + ' -> ' + it.title + ' is empty')
|
||||
if self.ch in chNumRis: # so a priori quanti risultati dovrebbe dare
|
||||
for content in chNumRis[self.ch]:
|
||||
if content in it.title:
|
||||
risNum = len(itemlist) - 1 # - nextpage
|
||||
self.assertEqual(risNum, chNumRis[self.ch][content],
|
||||
'channel ' + self.ch + ' -> ' + it.title + ' returned ' + str(
|
||||
risNum) + ' results but should have returned ' + str(
|
||||
chNumRis[self.ch][content]))
|
||||
break
|
||||
|
||||
for resIt in itemlist:
|
||||
self.assertLess(len(resIt.fulltitle), 100, 'channel ' + self.ch + ' -> ' + it.title + ' might contain wrong titles\n' + resIt.fulltitle)
|
||||
self.assertLess(len(resIt.fulltitle), 100,
|
||||
'channel ' + self.ch + ' -> ' + it.title + ' might contain wrong titles\n' + resIt.fulltitle)
|
||||
if resIt.url:
|
||||
self.assertIsNotNone(re.match(validUrlRegex, resIt.url), 'channel ' + self.ch + ' -> ' + it.title + ' -> ' + resIt.title + ' might contain wrong url\n' + resIt.url)
|
||||
self.assertIsNotNone(re.match(validUrlRegex, resIt.url),
|
||||
'channel ' + self.ch + ' -> ' + it.title + ' -> ' + resIt.title + ' might contain wrong url\n' + resIt.url)
|
||||
if 'year' in resIt.infoLabels and resIt.infoLabels['year']:
|
||||
msgYear = 'channel ' + self.ch + ' -> ' + it.title + ' might contain wrong infolabels year\n' + str(resIt.infoLabels['year'])
|
||||
msgYear = 'channel ' + self.ch + ' -> ' + it.title + ' might contain wrong infolabels year\n' + str(
|
||||
resIt.infoLabels['year'])
|
||||
self.assert_(type(resIt.infoLabels['year']) is int or resIt.infoLabels['year'].isdigit(), msgYear)
|
||||
self.assert_(int(resIt.infoLabels['year']) > 1900 and int(resIt.infoLabels['year']) < 2100, msgYear)
|
||||
|
||||
if resIt.title == typo(config.get_localized_string(30992), 'color kod bold'): # next page
|
||||
nextPageItemlist = getattr(self.module, resIt.action)(resIt)
|
||||
self.assertTrue(nextPageItemlist, 'channel ' + self.ch + ' -> ' + it.title + ' has nextpage not working')
|
||||
self.assertTrue(nextPageItemlist,
|
||||
'channel ' + self.ch + ' -> ' + it.title + ' has nextpage not working')
|
||||
self.assertTrue(hasChannelConfig, 'channel ' + self.ch + ' has no channel config')
|
||||
|
||||
def test_newest(self):
|
||||
@@ -130,6 +188,7 @@ class GenericChannelTest(unittest.TestCase):
|
||||
self.assertTrue(itemlist, 'channel ' + self.ch + ' returned no news for category ' + cat)
|
||||
break
|
||||
|
||||
|
||||
#
|
||||
# @parameterized.parameterized_class(getServers())
|
||||
# class GenericServerTest(unittest.TestCase):
|
||||
@@ -155,4 +214,5 @@ class GenericChannelTest(unittest.TestCase):
|
||||
# self.assertEqual(requests.head(directUrl, headers=headers, timeout=15).status_code, 200, self.srv + ' scraper did not return valid url for link ' + link)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
config.set_setting('tmdb_active', False)
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user