KoD 0.8.1

- riorganizzate le impostazioni
- aggiunte descrizioni tag qualità su cb01 (presto anche sugli altri)
- aggiunto il supporto alle serie di polpotv
- fixato server mystream
- fix Rinumerazione per episodi Nuovi
This commit is contained in:
marco
2020-03-14 17:03:57 +01:00
parent c642ddc358
commit 3cdedad7e8
413 changed files with 10944 additions and 1540 deletions

View File

@@ -158,7 +158,8 @@ def peliculas(item):
# patronBlock=[r'<div class="?sequex-page-left"?>(?P<block>.*?)<aside class="?sequex-page-right"?>',
# '<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)']
if 'newest' not in item.args: patronNext = '<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">'
# if 'newest' not in item.args:
patronNext = '<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">'
return locals()
@@ -203,13 +204,6 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\t', '', data)
# Extract the quality format
patronvideos = '>([^<]+)</strong></div>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
QualityStr = ""
for match in matches:
QualityStr = scrapertools.decodeHtmlentities(match.group(1))
# Estrae i contenuti - Streaming
load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "orange", "Streaming", "SD")
@@ -220,12 +214,9 @@ def findvideos(item):
load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "pink", "Streaming 3D")
itemlist = support.server(item, itemlist=itemlist)
if itemlist and QualityStr:
itemlist.insert(0,
Item(channel=item.channel,
action="",
title=support.typo(QualityStr,'[] color kod bold'),
folder=False))
# Extract the quality format
patronvideos = '>([^<]+)</strong></div>'
support.addQualityTag(item, itemlist, data, patronvideos)
return itemlist

View File

@@ -9,8 +9,12 @@ from core import scrapertools, httptools, support
from core.item import Item
from platformcode import config
def findhost():
page = httptools.downloadpage("https://www.filmpertutti.group/").data
url = scrapertools.find_single_match(page, 'Il nuovo indirizzo di FILMPERTUTTI è <a href="([^"]+)')
return url
host = config.get_channel_url()
host = config.get_channel_url(findhost)
headers = [['Referer', host]]
list_servers = ['mixdrop', 'akvideo', 'wstream', 'onlystream', 'speedvideo']
list_quality = ['HD', 'SD']

View File

@@ -12,10 +12,6 @@ host = config.get_channel_url()
list_servers = ['mixdrop', 'speedvideo', 'gounlimited', 'onlystream', 'youtube']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'piratestreaming')
checklinks_number = config.get_setting('checklinks_number', 'piratestreaming')
headers = [['Referer', host]]
@support.menu

View File

@@ -6,7 +6,7 @@
"adult": false,
"thumbnail": "polpotv.png",
"banner": "polpotv.png",
"categories": ["movie"],
"categories": ["movie","tvshow"],
"not_active":[],
"default_off":["include_in_newest"],
"settings": []

View File

@@ -2,7 +2,7 @@
# ------------------------------------------------------------
# KoD - XBMC Plugin
# Canale polpotv
# ------------------------------------------------------------
# ------------------------------------------------------------
from core import scrapertools, httptools, support, jsontools
from core.item import Item
@@ -18,12 +18,21 @@ list_quality = ['1080p','720p','480p','360p']
@support.menu
def mainlist(item):
menu = [
('Ultimi Film aggiunti', ['/api/movies', 'peliculas', '']),
('Generi', ['/api/genres', 'search_movie_by_genre', '']),
('Anni {film}', ['', 'search_movie_by_year', '']),
('Cerca Film... bold', ['', 'search', ''])
]
# menu = [
# ('Ultimi Film aggiunti', ['/api/movies', 'peliculas', '']),
# ('Ultime Serie TV aggiunte', ['/api/shows', 'peliculas', '']),
# ('Generi', ['/api/genres', 'search_movie_by_genre', '']),
# ('Anni {film}', ['', 'search_movie_by_year', '']),
# ('Cerca... bold', ['', 'search', ''])
# ]
film = ['/api/movies',
('Generi', ['/api/genres', 'search_movie_by_genre', '']),
('Anni', ['', 'search_movie_by_year', '']),]
tvshow=['/api/shows']
search=''
return locals()
def newest(categoria):
@@ -32,16 +41,23 @@ def newest(categoria):
if categoria == 'peliculas':
item.contentType = 'movie'
item.url = host + '/api/movies'
elif categoria == 'series':
item.contentType = 'tvshow'
item.url = host+'/api/shows'
return peliculas(item)
def peliculas(item):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
json_object = jsontools.load(data)
for movie in json_object['hydra:member']:
itemlist.extend(get_itemlist_movie(movie,item))
for element in json_object['hydra:member']:
if 'shows' not in item.url:
item.contentType='movie'
else:
item.contentType='tvshow'
itemlist.extend(get_itemlist_element(element,item))
try:
if support.inspect.stack()[1][3] not in ['newest']:
@@ -51,6 +67,36 @@ def peliculas(item):
return itemlist
def episodios(item):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
json_object = jsontools.load(data)
for season in json_object['seasons']:
seas_url=host+season['@id']+'/releases'
itemlist_season=get_season(item.channel, seas_url, season['seasonNumber'])
if(len(itemlist_season)>0):
itemlist.extend(itemlist_season)
support.videolibrary(itemlist, item, 'color kod bold')
return itemlist
def get_season(channel, seas_url, seasonNumber):
support.log()
itemlist = []
data = httptools.downloadpage(seas_url, headers=headers).data
json_object = jsontools.load(data)
for episode in json_object['hydra:member']:
itemlist.append(
Item(channel=channel,
action='findvideos',
contentType='episode',
title=str(seasonNumber)+"x"+str("%02d"%episode['episodeNumber']),
url=seas_url,
extra=str(len(json_object['hydra:member'])-episode['episodeNumber'])))
return itemlist[::-1]
def search(item, texto):
support.log(item.url, "search", texto)
itemlist=[]
@@ -59,7 +105,14 @@ def search(item, texto):
data = httptools.downloadpage(item.url, headers=headers).data
json_object = jsontools.load(data)
for movie in json_object['hydra:member']:
itemlist.extend(get_itemlist_movie(movie,item))
item.contentType='movie'
itemlist.extend(get_itemlist_element(movie,item))
item.url = host + "/api/shows?originalTitle="+texto+"&translations.name=" +texto
data = httptools.downloadpage(item.url, headers=headers).data
json_object = jsontools.load(data)
for tvshow in json_object['hydra:member']:
item.contentType='tvshow'
itemlist.extend(get_itemlist_element(tvshow,item))
return itemlist
# Continua la ricerca in caso di errore
except:
@@ -104,10 +157,10 @@ def findvideos(item):
try:
data = httptools.downloadpage(item.url, headers=headers).data
json_object = jsontools.load(data)
for video in json_object['hydra:member'][0]['playlist']['videos']:
# data = httptools.downloadpage(video['src'], headers={'Origin': host},follow_redirects=None).data
# patron = 'href="([^"]+)"'
# video_link = scrapertools.find_single_match(data, patron)
array_index=0
if item.contentType!='movie':
array_index=int(item.extra)
for video in json_object['hydra:member'][array_index]['playlist']['videos']:
itemlist.append(
Item(
channel=item.channel,
@@ -121,44 +174,53 @@ def findvideos(item):
pass
return support.server(item, itemlist=itemlist)
def get_itemlist_movie(movie,item):
def get_itemlist_element(element,item):
support.log()
itemlist=[]
try:
if movie['originalLanguage']['id']=='it':
scrapedtitle=movie['originalTitle']
if element['originalLanguage']['id']=='it':
scrapedtitle=element['originalTitle']
else:
scrapedtitle=movie['translations'][1]['name']
scrapedtitle=element['translations'][1]['name']
if scrapedtitle=='':
scrapedtitle=movie['originalTitle']
scrapedtitle=element['originalTitle']
except:
scrapedtitle=movie['originalTitle']
scrapedtitle=element['originalTitle']
try:
scrapedplot=movie['translations'][1]['overview']
scrapedplot=element['translations'][1]['overview']
except:
scrapedplot = ""
try:
scrapedthumbnail="http://"+movie['posterPath']
scrapedthumbnail="http://"+element['posterPath']
except:
scrapedthumbnail=""
try:
scrapedfanart="http://"+movie['backdropPath']
scrapedfanart="http://"+element['backdropPath']
except:
scrapedfanart=""
infoLabels = {}
infoLabels['tmdbid']=movie['tmdbId']
if item.contentType=='movie':
next_action='findvideos'
quality=support.typo(element['lastQuality'].upper(), '_ [] color kod bold')
url="%s%s/releases"
infoLabels['tmdbid']=element['tmdbId']
else:
next_action='episodios'
quality=''
url="%s%s"
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=support.typo(scrapedtitle,'bold') + support.typo(movie['lastQuality'].upper(), '_ [] color kod bold'),
action=next_action,
title=support.typo(scrapedtitle,'bold') + quality,
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=scrapedplot,
fanart=scrapedfanart,
thumbnail=scrapedthumbnail,
contentType='movie',
contentType=item.contentType,
contentTitle=scrapedtitle,
url="%s%s/releases" %(host,movie['@id'] ),
url=url %(host,element['@id'] ),
infoLabels=infoLabels,
extra=item.extra))
return itemlist

View File

@@ -5,7 +5,11 @@
import requests
from core import support
from lib.concurrent import futures
import sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
current_session = requests.Session()
host = support.config.get_channel_url()
onair = host + '/palinsesto/onAir.json'
@@ -112,10 +116,10 @@ def search(item, text):
json = current_session.get(host + '/dl/RaiTV/RaiPlayMobile/Prod/Config/programmiAZ-elenco.json').json()
for key in json:
for key in json[key]:
if key.has_key('PathID') and (text.lower() in key['name'].lower()):
if 'PathID' in key and (text.lower() in key['name'].lower()):
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'], url = key['PathID'].replace('/?json', '.json'), action = 'Type',
thumbnail = getUrl(key['images']['portrait'] if key['images'].has_key('portrait') else key['images']['portrait43'] if key['images'].has_key('portrait43') else key['images']['landscape']),
fanart = getUrl(key['images']['landscape'] if key['images'].has_key('landscape') else key['images']['landscape43'])))
thumbnail = getUrl(key['images']['portrait'] if 'portrait' in key['images'] else key['images']['portrait43'] if 'portrait43' in key['images'] else key['images']['landscape']),
fanart = getUrl(key['images']['landscape'] if 'landscape' in key['images'] else key['images']['landscape43'])))
except:
import sys
for line in sys.exc_info():
@@ -233,7 +237,7 @@ def findvideos(item):
if item.url.endswith('json'):
json = current_session.get(item.url).json()
if json.has_key('first_item_path'):
if 'first_item_path' in json:
url = current_session.get(getUrl(json['first_item_path'])).json()['video']['content_url']
else:
url = json['video']['content_url']
@@ -288,9 +292,9 @@ def load_episodes(key, item):
itemlist=[]
json = current_session.get(getUrl(key['path_id'])).json()['items']
for key in json:
ep = support.match(key['subtitle'].encode('utf8'), patron=r'St\s*(\d+)\s*Ep\s*(\d+)').match
ep = support.match(key['subtitle'], patron=r'St\s*(\d+)\s*Ep\s*(\d+)').match
if ep:
title = ep[0] + 'x' + ep[1].zfill(2) + support.re.sub(r'St\s*\d+\s*Ep\s*\d+','',key['subtitle'].encode('utf8'))
title = ep[0] + 'x' + ep[1].zfill(2) + support.re.sub(r'St\s*\d+\s*Ep\s*\d+','',key['subtitle'])
else:
title = key['subtitle'].strip()
if not title:

View File

@@ -101,7 +101,11 @@ def episodios(item):
seasons = support.match(url, patronBlock=patron_season, patron=patron_option)
data = ''
from concurrent import futures
import sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
with futures.ThreadPoolExecutor() as executor:
thL = []
for i, season in enumerate(seasons.matches):

View File

@@ -6,7 +6,11 @@ import requests, re
from core import support, tmdb
from core.item import Item
from specials import autorenumber
from lib.concurrent import futures
import sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
host = support.config.get_channel_url()