modifica canale per uso completo di support e servertools

This commit is contained in:
greko
2019-05-19 16:00:40 +02:00
parent 66515cd4a5
commit bd026be036
4 changed files with 103 additions and 215 deletions

View File

@@ -4,27 +4,25 @@
"active": true,
"adult": false,
"language": ["ita"],
"fanart": "",
"thumbnail": "",
"fanart": "https://altadefinizione01.estate/templates/Dark/img/logo2.png",
"thumbnail": "https://altadefinizione01.estate/templates/Dark/img/logo2.png",
"banner": "http://altadefinizione01.link/templates/Dark/img/logonyy.png",
"fix" : "reimpostato url e modificato file per KOD",
"change_date": "2019-30-04",
"categories": [
"movie"
],
"settings": [
{
"id": "modo_grafico",
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_film",
"type": "bool",
"label": "Includi in Novità",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
@@ -36,31 +34,6 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
@@ -70,24 +43,8 @@
"enabled": true,
"visible": true,
"lvalues": [
"Non filtrare",
"IT"
]
},
{
"id": "perfil",
"type": "list",
"label": "profilo dei colori",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
"No filtrar",
"Italiano"
]
}
]

View File

@@ -3,17 +3,19 @@
# -*- Creato per Alfa-addon -*-
# -*- e adattato for KOD -*-
# -*- By Greko -*-
# -*- last change: 04/05/2019
# -*- last change: 19/05/2019
"""
modificati:
core/servertools.py
channels/support.py
problemi noti:
non ordina le categorie
da sistemare ma in un altro file il titolo nella pagina server
from channels import autoplay, support, filtertools
from channelselector import get_thumb
from core import httptools
from core import channeltools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
"""
from channels import autoplay, support
from platformcode import config, logger
__channel__ = "altadefinizione01_link"
@@ -22,19 +24,20 @@ __channel__ = "altadefinizione01_link"
#host = "http://altadefinizione01.art/" # aggiornato al 22 marzo 2019
#host = "https://altadefinizione01.network/" #aggiornato al 22 marzo 2019
#host = "http://altadefinizione01.date/" #aggiornato al 3 maggio 2019
host = "https://altadefinizione01.voto/" #aggiornato al 3 maggio 2019
#host = "https://altadefinizione01.voto/" #aggiornato al 3 maggio 2019
host = "https://altadefinizione01.estate/" # aggiornato al 19 maggio 2019
# ======== def per utility INIZIO ============================
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
##__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
##__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]#,['Accept-Language','it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3']]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamcherry','rapidvideo', 'streamango', 'supervideo']
##IDIOMAS = {'Italiano': 'IT'}
##list_language = IDIOMAS.values()
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
list_quality = ['default']
# =========== home menu ===================
@@ -45,153 +48,68 @@ def mainlist(item):
:param item:
:return: itemlist []
"""
logger.info("%s mainlist log: %s" % (__channel__, item))
support.log()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
# Menu Principale
support.menu(itemlist, 'Film Ultimi Arrivi bold', 'peliculas', host)#, args='film')
support.menu(itemlist, 'Genere', 'categorie', host, args=['','genres'])
support.menu(itemlist, 'Per anno submenu', 'categorie', host, args=['Film per Anno','years'])
support.menu(itemlist, 'Per qualità submenu', 'categorie', host, args=['Film per qualità','quality'])
support.menu(itemlist, 'Novità bold', 'peliculas', host)
support.menu(itemlist, 'Film per Genere', 'genres', host, args='genres')
support.menu(itemlist, 'Film per Anno submenu', 'genres', host, args='years')
support.menu(itemlist, 'Film per Qualità submenu', 'genres', host, args='quality')
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host+'film-del-cinema')
support.menu(itemlist, 'Popolari bold', 'categorie', host+'piu-visti.html', args=['popular',''])
support.menu(itemlist, 'Mi sento fortunato bold', 'categorie', host, args=['fortunato','lucky'])
support.menu(itemlist, 'Popolari bold', 'peliculas', host+'piu-visti.html')
support.menu(itemlist, 'Mi sento fortunato bold', 'genres', host, args='lucky')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host+'film-sub-ita/')
support.menu(itemlist, 'Cerca film submenu', 'search', host)
# per autoplay
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
# ======== def in ordine di menu ===========================
# ======== def in ordine di action dal menu ===========================
def peliculas(item):
logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
support.log
itemlist = []
# scarico la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# da qui fare le opportuni modifiche
patron = 'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)".*?'\
'class="ml-item-title">([^"]+)</.*?class="ml-item-label">'\
'(.*?)<.*?class="ml-item-label">.*?class="ml-item-label">(.*?)</'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedimg, scrapedtitle, scrapedyear, scrapedlang in matches:
if 'italiano' in scrapedlang.lower():
scrapedlang = 'ITA'
else:
scrapedlang = 'Sub-Ita'
itemlist.append(Item(
channel=item.channel,
action="findvideos",
contentTitle=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
infoLabels={'year': scrapedyear},
contenType="movie",
thumbnail=scrapedimg,
title="%s [%s]" % (scrapedtitle, scrapedlang),
language=scrapedlang,
context="buscar_trailer"
))
patron = r'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)"'\
'.*?class="ml-item-title">([^"]+)</.*?class="ml-item-label">(.*?)'\
'<.*?class="ml-item-label">.*?class="ml-item-label ml-item-label-.*?">'\
'(.*?)</div>.*?class="ml-item-label">(.*?)</'
listGroups = ['url', 'thumb', 'title', 'year', 'quality', 'lang']
# poichè il sito ha l'anno del film con TMDB la ricerca titolo-anno è esatta quindi inutile fare lo scrap delle locandine
# e della trama dal sito che a volte toppano
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
support.nextPage(itemlist,item,data,'<span>\d</span> <a href="([^"]+)">')
patronNext = '<span>\d</span> <a href="([^"]+)">'
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,
action='findvideos')
return itemlist
# =========== def pagina categorie ======================================
def categorie(item):
logger.info("%s mainlist categorie log: %s" % (__channel__, item))
def genres(item):
support.log
itemlist = []
# scarico la pagina
data = httptools.downloadpage(item.url, headers=headers).data
#data = httptools.downloadpage(item.url, headers=headers).data
if item.args == 'genres':
bloque = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
elif item.args == 'years':
bloque = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
elif item.args == 'quality':
bloque = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
elif item.args == 'lucky': # sono i titoli random nella pagina, alcuni rimandano solo a server a pagamento
bloque = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
# da qui fare le opportuni modifiche
if item.args[1] == 'genres':
bloque = scrapertools.find_single_match(data, '<ul class="listSubCat" id="Film">(.*?)</ul>')
elif item.args[1] == 'years':
bloque = scrapertools.find_single_match(data, '<ul class="listSubCat" id="Anno">(.*?)</ul>')
elif item.args[1] == 'quality':
bloque = scrapertools.find_single_match(data, '<ul class="listSubCat" id="Qualita">(.*?)</ul>')
elif item.args[1] == 'lucky': # sono i titoli random nella pagina, alcuni rimandano solo a server a pagamento
bloque = scrapertools.find_single_match(data, 'FILM RANDOM.*?class="listSubCat">(.*?)</ul>')
patron = '<li><a href="/(.*?)">(.*?)<'
matches = scrapertools.find_multiple_matches(bloque, patron)
patron = r'<li><a href="([^"]+)">(.*?)<'
if item.args[1] == 'lucky':
bloque = scrapertools.find_single_match(data, 'FILM RANDOM.*?class="listSubCat">(.*?)</ul>')
patron = '<li><a href="(.*?)">(.*?)<'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapurl, scraptitle in sorted(matches):
if item.args[1] != 'lucky':
url = host+scrapurl
action="peliculas"
else:
url = scrapurl
action = "findvideos_film"
itemlist.append(Item(
channel=item.channel,
action=action,
title = scraptitle,
url=url,
thumbnail=get_thumb(scraptitle, auto = True),
Folder = True,
))
return itemlist
# =========== def pagina del film con i server per verderlo =============
# da sistemare che ne da solo 1 come risultato
def findvideos(item):
logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
itemlist = []
# scarico la pagina
#data = scrapertools.cache_page(item.url) #non funziona più?
data = httptools.downloadpage(item.url, headers=headers).data
# da qui fare le opportuni modifiche
patron = '<li.*?<a href="#" data-target="(.*?)">'
matches = scrapertools.find_multiple_matches(data, patron)
#logger.info("altadefinizione01_linkMATCHES: %s " % matches)
for scrapedurl in matches:
try:
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
logger.info("Videoitemlist2: %s" % videoitem)
videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title)#"[%s] %s" % (videoitem.server, item.title) #"[%s]" % (videoitem.title)
videoitem.show = item.show
videoitem.contentTitle = item.contentTitle
videoitem.contentType = item.contentType
videoitem.channel = item.channel
videoitem.year = item.infoLabels['year']
videoitem.infoLabels['plot'] = item.infoLabels['plot']
except AttributeError:
logger.error("data doesn't contain expected URL")
# Controlla se i link sono validi
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
# Aggiunge alla videoteca
if item.extra != 'findvideos' and item.extra != "library" and config.get_videolibrary_support() and len(itemlist) != 0 :
support.videolibrary(itemlist, item)
listGroups = ['url','title']
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patron_block = bloque,
action='peliculas')
return itemlist
@@ -204,7 +122,7 @@ def search(item, text):
item.url = host+"/index.php?do=search&story=%s&subaction=search" % (text)
#item.extra = "search"
try:
return peliculas(item)
return film(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
@@ -215,20 +133,18 @@ def search(item, text):
# =========== def per le novità nel menu principale =============
def newest(categoria):
logger.info("%s mainlist search log: %s" % (__channel__, categoria))
support.log(categoria)
itemlist = []
item = Item()
#item.extra = 'film'
try:
if categoria == "film":
if categoria == "peliculas":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():

View File

@@ -107,7 +107,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
# patron = 'blablabla'
# headers = [['Referer', host]]
# blacklist = 'Request a TV serie!'
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot'],
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot','episode','lang'],
# headers=headers, blacklist=blacklist)
itemlist = []
@@ -128,15 +128,15 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
block = ""
for b in blocks:
block += "\n" + b
block += "\n" + str(b) # by greko
log('BLOCK ', n, '=', block)
else:
block = data
if patron and listGroups:
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
known_keys = ['url', 'title', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating'] #by greko aggiunto episode
known_keys = ['url', 'title', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'lang'] # by greko aggiunto episode + lang
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
@@ -152,12 +152,19 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
if scraped["quality"] and scraped["episode"]: # by greko aggiunto episode
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B][COLOR blue][' + scraped["quality"] + '][/COLOR]' # by greko aggiunto episode
elif scraped["episode"]: # by greko aggiunto episode
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B]' # by greko aggiunto episode
else:
longtitle = '[B]' + title + '[/B]'
# modificato by Greko inizio
longtitle = '[B]' + title + '[/B] '
if scraped["quality"]: # by greko aggiunto episode
longtitle += '[COLOR blue][' + scraped["quality"] + '][/COLOR]'#'[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B][COLOR blue][' + scraped["quality"] + '][/COLOR]' # by greko aggiunto episode
if scraped["episode"]: # by greko aggiunto episode
longtitle += '[B]' + scraped["episode"] + '[/B]'#'[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B]' # by greko aggiunto episode
if scraped["lang"]:
if 'sub' in scraped["lang"].lower():
lang = 'Sub-ITA'
else:
lang = 'ITA'
longtitle += '[COLOR blue][ ' + lang + ' ][/COLOR]'
# modificato by Greko fine
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
infolabels = item.infoLabels

View File

@@ -59,6 +59,12 @@ def find_video_items(item=None, data=None):
itemlist.append(
item.clone(title=title, action="play", url=url, thumbnail=thumbnail, server=server, folder=False))
# fix by Greko inizio
# Controlla se i link sono validi per tutti i canali
# non c'è + bisogno dei controlli nei file[.json, py]
itemlist = check_list_links(itemlist)
# fix by Greko fine
return itemlist
@@ -121,7 +127,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
# Ordenar segun favoriteslist si es necesario
if sort:
itemlist = sort_servers(itemlist)
return itemlist
@@ -184,6 +190,8 @@ def findvideosbyserver(data, serverid):
devuelve.append(value)
logger.info(msg)
return devuelve