- Fix AnimeWorld

- Nuovo Canale Paramount Network
This commit is contained in:
Alhaziel01
2021-01-16 12:37:37 +01:00
parent 312938d845
commit 3b65319e2c
5 changed files with 125 additions and 88 deletions

View File

@@ -201,22 +201,15 @@ def findvideos(item):
if match:
epID, epurl = match
if 'vvvvid' in name.lower():
urls.append(support.match(host + '/api/episode/serverPlayer?id=' + epID, headers=headers, patron=r'<a.*?href="([^"]+)"').match)
urls.append(support.match(host + '/api/episode/ugly/serverPlayerAnimeWorld?id=' + epID, headers=headers, patron=r'<a.*?href="([^"]+)"').match)
elif 'animeworld' in name.lower():
url = support.match(data, patron=r'href="([^"]+)"\s*id="alternativeDownloadLink"', headers=headers).match
title = support.match(url, patron=r'http[s]?://(?:www.)?([^.]+)', string=True).match
itemlist.append(item.clone(action="play", title=title, url=url, server='directo'))
# elif 'server 2' in name.lower():
# dataJson = support.match(host + '/api/episode/info?id=' + epID + '&alt=0', headers=headers).data
# json = jsontools.load(dataJson)
# title = support.match(json['grabber'], patron=r'server2.([^.]+)', string=True).match
# itemlist.append(item.clone(action="play", title=title, url=json['grabber'], server='directo'))
else:
dataJson = support.match(host + '/api/episode/info?id=' + epID + '&alt=0', headers=headers).data
json = jsontools.load(dataJson)
title = support.match(json['grabber'], patron=r'server2.([^.]+)', string=True).match
title = support.match(json['grabber'], patron=r'server\d+.([^.]+)', string=True).match
if title: itemlist.append(item.clone(action="play", title=title, url=json['grabber'].split('=')[-1], server='directo'))
else: itemlist.append(item.clone(action="play", title=name, url=json['grabber'], server=name))
# for item in itemlist:
# support.logger.debug(item.url)
return support.server(item, urls, itemlist)

View File

@@ -1,22 +1,12 @@
{
"id": "paramount",
"name": "Paramount Network",
"active": false,
"active": true,
"language": ["ita"],
"thumbnail": "paramount.png",
"banner": "paramount.png",
"categories": ["movie", "tvshow", "documentary", "live"],
"not_active": ["include_in_newest"],
"default_off": ["include_in_global_search"],
"settings": [
{
"id": "pagination",
"type": "list",
"label": "Elementi per pagina",
"default": 1,
"enabled": true,
"visible": true,
"lvalues": ["20", "40", "60", "80", "100"]
}
]
"settings": []
}

View File

@@ -2,9 +2,9 @@
# ------------------------------------------------------------
# Canale per Paramount Network
# ------------------------------------------------------------
import inspect
from core import support, jsontools
from platformcode import autorenumber
from platformcode import autorenumber, logger
# host = support.config.get_channel_url()
host = 'https://www.paramountnetwork.it'
@@ -13,9 +13,9 @@ headers = [['Referer', host]]
@support.menu
def mainlist(item):
top = [('Dirette {bold}', ['/dl/RaiPlay/2016/PublishingBlock-9a2ff311-fcf0-4539-8f8f-c4fee2a71d58.html?json', 'live'])]
film = []
tvshow = []
top = [('Dirette {bold}', ['', 'live'])]
film = ['/film']
tvshow = ['/programmi']
return locals()
@support.scrape
@@ -27,20 +27,21 @@ def menu(item):
def search(item, text):
support.info(text)
logger.info(text)
item.search = text.replace(' ','+')
item.text = text
try:
return peliculas(item)
# Continua la ricerca in caso di errore .
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
logger.error("%s" % line)
return []
def live(item):
logger.debug()
itemlist=[]
urls=[]
matches = support.match(host, patron=r'(/diretta-tv/[^"]+)"[^>]+>([^ ]+)').matches
@@ -56,60 +57,46 @@ def live(item):
info = jsontools.load(support.match(host +'/api/on-air?channelId=' + ch_dict[title]).data)
support.info(info)
plot= '[B]' + info['seriesTitle'] +'[/B]\n' + info['description'] if 'seriesTitle' in info else ''
itemlist.append(item.clone(title=support.typo(title,'bold'), contentTitle=title, url=host+url, plot=plot, action='findvideos'))
return itemlist
itemlist.append(item.clone(title=support.typo(title,'bold'), contentTitle=title, fulltitle=title, show=title, url=host+url, plot=plot, action='play', forcethumb=True))
return support.thumb(itemlist, live=True)
def peliculas(item):
logger.debug()
def load_more(url):
second_url = host if url.startswith('/') else '' + url.replace('\u002F','/').replace('%5C','/')
new_data = support.match(host + second_url).data.replace('\x01','l').replace('\x02','a')
return jsontools.load(new_data)['items']
itemlist = []
if item.contentType == 'movie':
Type = 'Movie'
action = 'findvideos'
else:
Type = 'Series'
action = 'episodios'
if not item.page: item.page = 1
pagination_values = [20, 40, 60, 80, 100]
pagination = pagination_values[support.config.get_setting('pagination','paramount')]
item.url = host + '/api/search?activeTab=' + Type + '&searchFilter=site&pageNumber=0&rowsPerPage=10000'
data = jsontools.load(support.match(item).data)['response']['items']
titles = []
for it in data:
title = it['meta']['header']['title']
if title not in titles:
titles.append(title)
d = it['meta']['date'].split('/') if it['meta']['date'] else ['0000','00','00']
date = int(d[2] + d[1] + d[0])
if item.search.lower() in title.lower() \
and 'stagione' not in it['url'] \
and 'season' not in it['url'] \
and title not in ['Serie TV']:
data = []
page_data = support.match(item.url).data
more = support.match(page_data, patron=r'loadingTitle":[^,]+,"url":"([^"]+)"').match
data = jsontools.load(support.scrapertools.decodeHtmlentities(support.match(page_data, patron=[r'"nextPageUrl":[^,]+,"items":(.*?),"customContainerClass"', r'Streaming"},"items":(.*?),"isGrid"']).match))
if data:
if more:
new_data = load_more(more)
data += new_data
for it in data:
title = it['meta']['header']['title']
if item.text.lower() in title.lower():
itemlist.append(
item.clone(title=support.typo(title,'bold'),
action=action,
fulltitle=title,
show=title,
contentTitle=title if it['type'] == 'movie' else '',
contentSerieName=title if it['type'] != 'movie' else '',
plot= it['meta']['description'] if 'description' in it['meta'] else '',
url=host + it['url'],
date=date,
thumbnail='https:' + it['media']['image']['url'] if 'url' in it['media']['image'] else item.thumbnail))
itemlist.sort(key=lambda item: item.fulltitle)
if not item.search:
itlist = []
for i, it in enumerate(itemlist):
if pagination and (item.page - 1) * pagination > i and not item.search: continue # pagination
if pagination and i >= item.page * pagination and not item.search: break # pagination
itlist.append(it)
if pagination and len(itemlist) >= item.page * pagination and not item.search:
itlist.append(item.clone(channel=item.channel, action = 'peliculas', title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), page=item.page + 1, thumbnail=support.thumb()))
itemlist = itlist
autorenumber.start(itemlist)
fulltitle = title,
show = title,
contentTitle = title if item.contentType == 'movie' else '',
contentSerieName = title if item.contentType != 'movie' else '',
url = host + it['url'] if it['url'].startswith('/') else it['url'],
thumbnail = it['media']['image']['url'],
fanart = it['media']['image']['url'],
plot = it['meta']['description'],
action = 'findvideos' if item.contentType == 'movie' else 'episodios'))
return itemlist
def episodios(item):
logger.debug()
def load_more(url):
second_url = host if url.startswith('/') else '' + url.replace('\u002F','/').replace('%5C','/')
new_data = support.match(host + second_url).data
@@ -121,7 +108,7 @@ def episodios(item):
page_data = support.match(item.url).data
seasons = support.match(page_data, patron=r'href="([^"]+)"[^>]+>Stagione\s*\d+').matches
more = support.match(page_data, patron=r'loadingTitle":[^,]+,"url":"([^"]+)"').match
data = jsontools.load(support.scrapertools.decodeHtmlentities(support.match(page_data, patron=r'"isEpisodes":[^,]+,"items":(.*?),"as"').match))
data = jsontools.load(support.scrapertools.decodeHtmlentities(support.match(page_data, patron=r'"isEpisodes":[^,]+,"items":(.*?),"isKidsUI"').match))
if data:
if more:
@@ -129,7 +116,7 @@ def episodios(item):
if seasons:
for url in seasons:
new_data = support.match(host + url).data
data += jsontools.load(support.scrapertools.decodeHtmlentities(support.match(new_data, patron=r'isEpisodes":[^,]+,"items":(.*?),"as"').match.replace('\x01','l').replace('\x02','a')))
data += jsontools.load(support.scrapertools.decodeHtmlentities(support.match(new_data, patron=r'isEpisodes":[^,]+,"items":(.*?),"isKidsUI"').match.replace('\x01','l').replace('\x02','a')))
match = support.match(new_data, patron=r'loadingTitle":[^,]+,"url":"([^"]+)"').match
if match and match != load_more:
data += load_more(match)
@@ -162,17 +149,10 @@ def episodios(item):
def findvideos(item):
itemlist = []
qualities = []
logger.debug()
return support.server(item, item.url, Download=False)
mgid = support.match(item, patron=r'uri":"([^"]+)"').match
url = 'https://media.mtvnservices.com/pmt/e1/access/index.html?uri=' + mgid + '&configtype=edge&ref=' + item.url
ID, rootUrl = support.match(url, patron=[r'"id":"([^"]+)",',r'brightcove_mediagenRootURL":"([^"]+)"']).matches
url = jsontools.load(support.match(rootUrl.replace('&device={device}','').format(uri = ID)).data)['package']['video']['item'][0]['rendition'][0]['src']
video_urls = support.match(url, patron=r'RESOLUTION=(\d+x\d+).*?(http[^ ]+)').matches
for quality, url in video_urls:
if quality not in qualities:
qualities.append(quality)
itemlist.append(item.clone(title=support.config.get_localized_string(30137), server='directo', action='play', url=url, quality=quality, focusOnVideoPlayer=True))
itemlist.sort(key=lambda item: item.quality)
return support.server(item, itemlist=itemlist, Download=False)
def play(item):
logger.debug()
return support.servertools.find_video_items(item, data=item.url)

View File

@@ -0,0 +1,41 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "paramountnetwork.it/([^/]+/[A-Za-z0-9]+)",
"url": "https://www.paramountnetwork.it/\\1"
}
]
},
"free": true,
"id":"paramount_server",
"name": "Paramount",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@70708",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
from core import httptools, support, jsontools
from platformcode import config, logger
def test_video_exists(page_url):
global data
logger.debug('page url=', page_url)
response = httptools.downloadpage(page_url)
if response.code == 404:
return False, config.get_localized_string(70449) % 'Paramount'
else:
data = response.data
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("url=" + page_url)
qualities = []
video_urls = []
mgid = support.match(data, patron=r'uri":"([^"]+)"').match
url = 'https://media.mtvnservices.com/pmt/e1/access/index.html?uri=' + mgid + '&configtype=edge&ref=' + page_url
ID, rootUrl = support.match(url, patron=[r'"id":"([^"]+)",',r'brightcove_mediagenRootURL":"([^"]+)"']).matches
url = jsontools.load(support.match(rootUrl.replace('&device={device}','').format(uri = ID)).data)['package']['video']['item'][0]['rendition'][0]['src']
urls = support.match(url, patron=r'RESOLUTION=(\d+x\d+).*?(http[^ ]+)').matches
for quality, url in urls:
if quality not in qualities:
qualities.append(quality)
video_urls.append(["m3u8 {}p [Paramount]".format(quality.split('x')[-1]), url])
video_urls.sort(key=lambda url: int(support.match(url[0], patron=r'(\d+)p').match))
return video_urls