Merge remote-tracking branch 'origin/master'

This commit is contained in:
marco
2020-04-12 17:33:30 +02:00
3 changed files with 30 additions and 32 deletions

View File

@@ -37,36 +37,31 @@ list_quality = ['default']
def mainlist(item):
if conn_id:
anime = ['anime/',
('In Evidenza',['anime/', 'peliculas', 'channel/10005/last/']),
('Popolari',['anime/', 'peliculas', 'channel/10002/last/']),
('Nuove Uscite',['anime/', 'peliculas', 'channel/10007/last/']),
('Generi',['anime/', 'peliculas', 'channel/10004/last/?category=']),
('A-Z',['anime/', 'peliculas', 'channel/10003/last/?filter='])
]
film = ['film/',
('In Evidenza',['film/', 'peliculas', 'channel/10005/last/']),
('Popolari',['film/', 'peliculas', 'channel/10002/last/']),
('Nuove Uscite',['film/', 'peliculas', 'channel/10007/last/']),
('Generi',['film/', 'peliculas', 'channel/10004/last/?category=']),
('A-Z',['film/', 'peliculas', 'channel/10003/last/?filter=']),
]
tvshow = ['series/',
('In Evidenza',['series/', 'peliculas', 'channel/10005/last/']),
('Popolari',['series/', 'peliculas', 'channel/10002/last/']),
('Nuove Uscite',['series/', 'peliculas', 'channel/10007/last/']),
('Generi',['series/', 'peliculas', 'channel/10004/last/?category=']),
('A-Z',['series/', 'peliculas', 'channel/10003/last/?filter='])
]
show = [('Show bold {tv}',['show/', 'peliculas', 'channel/10005/last/', 'tvshow']),
('In Evidenza submenu {tv}',['show/', 'peliculas', 'channel/10005/last/', 'tvshow']),
show = [('Show bold {tv}',['show/', 'peliculas', '', 'tvshow']),
('Popolari submenu {tv}',['show/', 'peliculas', 'channel/10002/last/', 'tvshow']),
('Nuove Uscite submenu {tv}',['show/', 'peliculas', 'channel/10007/last/', 'tvshow']),
('Generi submenu {tv}',['show/', 'peliculas', 'channel/10004/last/?category=', 'tvshow']),
('A-Z submenu {tv}',['show/', 'peliculas', 'channel/10003/last/?filter=', 'tvshow']),
('Cerca Show... bold submenu {tv}', ['show/', 'search', '', 'tvshow'])
]
kids = [('Kids bold',['kids/', 'peliculas', 'channel/10005/last/', 'tvshow']),
('In Evidenza submenu {kids}',['kids/', 'peliculas', 'channel/10005/last/', 'tvshow']),
kids = [('Kids bold',['kids/', 'peliculas', '', 'tvshow']),
('Popolari submenu {kids}',['kids/', 'peliculas', 'channel/10002/last/', 'tvshow']),
('Nuove Uscite submenu {kids}',['kids/', 'peliculas', 'channel/10007/last/', 'tvshow']),
('Generi submenu {kids}',['kids/', 'peliculas', 'channel/10004/last/?category=', 'tvshow']),
@@ -77,6 +72,7 @@ def mainlist(item):
Top = [("Visibile solo dall'Italia bold",[])]
return locals()
def search(item, text):
support.log(text)
itemlist = []
@@ -93,6 +89,7 @@ def search(item, text):
return []
return itemlist
def newest(categoria):
item = Item()
item.args = 'channel/10007/last/'
@@ -108,29 +105,15 @@ def newest(categoria):
return peliculas(item)
def dl_pages(name,item):
itemlist = []
url = item.url + 'channel/10003/last/?filter=' + str(name)
json_file = current_session.get(url, headers=headers, params=payload).json()
if 'data' in json_file:
json_file = current_session.get(url, headers=headers, params=payload).json()
make_itemlist(itemlist, item, json_file)
return itemlist
def peliculas(item):
itemlist = []
if not item.args:
json_file = current_session.get(item.url + 'channels', headers=headers, params=payload).json()
names = [i['filter'] for i in json_file['data'] if 'filter' in i][0]
with futures.ThreadPoolExecutor() as executor:
json_file = [executor.submit(dl_pages, name, item,) for name in names]
for res in futures.as_completed(json_file):
if res.result():
itemlist += res.result()
itemlist = sorted(itemlist, key=lambda it: it.fulltitle)
json_file =loadjs(item.url + 'channel/10005/last/')
support.log(json_file)
make_itemlist(itemlist, item, json_file)
elif ('=' not in item.args) and ('=' not in item.url):
json_file = current_session.get(item.url + item.args, headers=headers, params=payload).json()
json_file=loadjs(item.url + item.args)
make_itemlist(itemlist, item, json_file)
elif '=' in item.args:
@@ -148,12 +131,16 @@ def peliculas(item):
contentType = item.contentType))
else :
json_file = current_session.get(item.url, headers=headers, params=payload).json()
json_file=loadjs(item.url)
make_itemlist(itemlist, item, json_file)
if item.contentType != 'movie': autorenumber.renumber(itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if 'category' in item.args:
support.thumb(itemlist,genre=True)
elif not 'filter' in item.args:
if item.contentType != 'movie': autorenumber.renumber(itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
itemlist = []
json_file = current_session.get(item.url, headers=headers, params=payload).json()
@@ -247,6 +234,14 @@ def make_itemlist(itemlist, item, data):
))
return itemlist
def loadjs(url):
if '?category' not in url:
url += '?full=true'
support.log('Json URL;',url)
json = current_session.get(url, headers=headers, params=payload).json()
return json
def encode(text):
if sys.version_info[0] >= 3:
return text

View File

@@ -36,6 +36,7 @@ def hdpass_get_servers(item):
mir = scrapertools.find_single_match(page, patron_mir)
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
mir_url = scrapertools.decodeHtmlentities(mir_url)
ret.append(Item(channel=item.channel,
action="play",
fulltitle=item.fulltitle,
@@ -45,7 +46,7 @@ def hdpass_get_servers(item):
contentType=item.contentType,
title=srv,
server=srv,
url=mir_url.replace('&','&')))
url= mir_url))
return ret
# Carica la pagina
itemlist = []
@@ -71,6 +72,7 @@ def hdpass_get_servers(item):
with futures.ThreadPoolExecutor() as executor:
thL = []
for res_url, res_video in scrapertools.find_multiple_matches(res, patron_option):
res_url = scrapertools.decodeHtmlentities(res_url)
thL.append(executor.submit(get_hosts, res_url, res_video))
for res in futures.as_completed(thL):
if res.result():
@@ -165,6 +167,7 @@ def scrapeLang(scraped, lang, longtitle):
def cleantitle(title):
if type(title) != str: title.decode('UTF-8')
title = scrapertools.decodeHtmlentities(title)
cleantitle = title.replace('"', "'").replace('×', 'x').replace('', '-').strip()
return cleantitle
@@ -297,8 +300,8 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
infoLabels=infolabels,
thumbnail=item.thumbnail if function == 'episodios' and not scraped["thumb"] else scraped["thumb"] if scraped["thumb"] else '',
args=item.args,
contentSerieName= scraped['title'] if scraped['title'] else item.fulltitle if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '',
contentTitle= scraped['title'] if item.contentType or CT == 'movie' else '',
contentSerieName= title if title else item.fulltitle if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '',
contentTitle= title if item.contentType or CT == 'movie' else '',
contentLanguage = lang1,
contentEpisodeNumber=episode if episode else '',
news= item.news if item.news else '',

View File

@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "(?:fembed|divload|cercafilm|sonline|verystream|).(?:com|net|pro|info)/((?:f|v)/[A-z0-9_-]+)",
"url": "https://www.feurl.com\\1"
"url": "https://www.feurl.com/\\1"
}
]
},