diff --git a/channels/altadefinizione01_link.json b/channels/altadefinizione01_link.json
index 47cb3ac3..a60ab421 100644
--- a/channels/altadefinizione01_link.json
+++ b/channels/altadefinizione01_link.json
@@ -4,15 +4,23 @@
"active": true,
"adult": false,
"language": ["ita"],
- "fanart": "",
- "thumbnail": "",
- "banner": "http://altadefinizione01.link/templates/Dark/img/logonyy.png",
+ "fanart": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
+ "thumbnail": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
+ "banner": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
"fix" : "reimpostato url e modificato file per KOD",
"change_date": "2019-30-04",
"categories": [
"movie"
],
"settings": [
+ {
+ "id": "channel_host",
+ "type": "text",
+ "label": "Host del canale",
+ "default": "https://altadefinizione01.estate/",
+ "enabled": true,
+ "visible": true
+ },
{
"id": "modo_grafico",
"type": "bool",
diff --git a/channels/altadefinizione01_link.py b/channels/altadefinizione01_link.py
index 7e690035..55f4f1c5 100644
--- a/channels/altadefinizione01_link.py
+++ b/channels/altadefinizione01_link.py
@@ -3,14 +3,13 @@
# -*- Creato per Alfa-addon -*-
# -*- e adattato for KOD -*-
# -*- By Greko -*-
-# -*- last change: 04/05/2019
+# -*- last change: 26/05/2019
-
-from channelselector import get_thumb
-from core import httptools, scrapertools, servertools, tmdb, support
+import channelselector
+from specials import autoplay
+from core import servertools, support, jsontools
from core.item import Item
from platformcode import config, logger
-from specials import autoplay, filtertools
__channel__ = "altadefinizione01_link"
@@ -18,21 +17,19 @@ __channel__ = "altadefinizione01_link"
#host = "http://altadefinizione01.art/" # aggiornato al 22 marzo 2019
#host = "https://altadefinizione01.network/" #aggiornato al 22 marzo 2019
#host = "http://altadefinizione01.date/" #aggiornato al 3 maggio 2019
-host = "https://altadefinizione01.voto/" #aggiornato al 3 maggio 2019
+#host = "https://altadefinizione01.voto/" #aggiornato al 3 maggio 2019
+#host = "https://altadefinizione01.estate/" # aggiornato al 23 maggio 2019
# ======== def per utility INIZIO ============================
-
+
+list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
+list_quality = ['default']
+
+host = config.get_setting("channel_host", __channel__)
checklinks = config.get_setting('checklinks', __channel__)
checklinks_number = config.get_setting('checklinks_number', __channel__)
-headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
- ['Referer', host]]#,['Accept-Language','it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3']]
-
-IDIOMAS = {'Italiano': 'IT'}
-list_language = IDIOMAS.values()
-list_servers = ['openload', 'streamcherry','rapidvideo', 'streamango', 'supervideo']
-list_quality = ['default']
-
+headers = [['Referer', host]]
# =========== home menu ===================
def mainlist(item):
@@ -41,164 +38,88 @@ def mainlist(item):
:param item:
:return: itemlist []
"""
- logger.info("%s mainlist log: %s" % (__channel__, item))
+ support.log()
itemlist = []
- autoplay.init(item.channel, list_servers, list_quality)
# Menu Principale
- support.menu(itemlist, 'Film Ultimi Arrivi bold', 'peliculas', host)#, args='film')
- support.menu(itemlist, 'Genere', 'categorie', host, args=['','genres'])
- support.menu(itemlist, 'Per anno submenu', 'categorie', host, args=['Film per Anno','years'])
- support.menu(itemlist, 'Per qualità submenu', 'categorie', host, args=['Film per qualità','quality'])
+ support.menu(itemlist, 'Novità bold', 'peliculas', host)
+ support.menu(itemlist, 'Film per Genere', 'genres', host, args='genres')
+ support.menu(itemlist, 'Film per Anno submenu', 'genres', host, args='years')
+ support.menu(itemlist, 'Film per Qualità submenu', 'genres', host, args='quality')
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host+'film-del-cinema')
- support.menu(itemlist, 'Popolari bold', 'categorie', host+'piu-visti.html', args=['popular',''])
- support.menu(itemlist, 'Mi sento fortunato bold', 'categorie', host, args=['fortunato','lucky'])
+ support.menu(itemlist, 'Popolari bold', 'peliculas', host+'piu-visti.html')
+ support.menu(itemlist, 'Mi sento fortunato bold', 'genres', host, args='lucky')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host+'film-sub-ita/')
support.menu(itemlist, 'Cerca film submenu', 'search', host)
+ # per autoplay
+ autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
+
+ itemlist.append(
+ Item(channel='setting',
+ action="channel_config",
+ title=support.typo("Configurazione Canale color lime"),
+ config=item.channel,
+ folder=False,
+ thumbnail=channelselector.get_thumb('setting_0.png'))
+ )
return itemlist
-# ======== def in ordine di menu ===========================
+# ======== def in ordine di action dal menu ===========================
def peliculas(item):
- logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
+ support.log
itemlist = []
- # scarico la pagina
- data = httptools.downloadpage(item.url, headers=headers).data
- # da qui fare le opportuni modifiche
- patron = 'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)".*?'\
- 'class="ml-item-title">([^"]+)'\
- '(.*?)<.*?class="ml-item-label">.*?class="ml-item-label">(.*?)'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedimg, scrapedtitle, scrapedyear, scrapedlang in matches:
- if 'italiano' in scrapedlang.lower():
- scrapedlang = 'ITA'
- else:
- scrapedlang = 'Sub-Ita'
- itemlist.append(Item(
- channel=item.channel,
- action="findvideos",
- contentTitle=scrapedtitle,
- fulltitle=scrapedtitle,
- url=scrapedurl,
- infoLabels={'year': scrapedyear},
- contenType="movie",
- thumbnail=scrapedimg,
- title="%s [%s]" % (scrapedtitle, scrapedlang),
- language=scrapedlang,
- context="buscar_trailer"
- ))
+ patron = r'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)"'\
+ '.*?class="ml-item-title">([^<]+) (\d{4}) <'\
+ '.*?class="ml-item-label">.*?class="ml-item-label ml-item-label-.+?"> '\
+ '(.+?) .*?class="ml-item-label"> (.+?) '
+ listGroups = ['url', 'thumb', 'title', 'year', 'quality', 'lang']
- # poichè il sito ha l'anno del film con TMDB la ricerca titolo-anno è esatta quindi inutile fare lo scrap delle locandine
- # e della trama dal sito che a volte toppano
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
-
- # Paginazione
- support.nextPage(itemlist,item,data,'\d ')
-
+ patronNext = '\d '
+
+ itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
+ headers= headers, patronNext=patronNext,
+ action='findvideos')
+
return itemlist
# =========== def pagina categorie ======================================
-def categorie(item):
- logger.info("%s mainlist categorie log: %s" % (__channel__, item))
+def genres(item):
+ support.log
itemlist = []
- # scarico la pagina
- data = httptools.downloadpage(item.url, headers=headers).data
+ #data = httptools.downloadpage(item.url, headers=headers).data
+ action = 'peliculas'
+ if item.args == 'genres':
+ bloque = r''
+ elif item.args == 'years':
+ bloque = r''
+ elif item.args == 'quality':
+ bloque = r''
+ elif item.args == 'lucky': # sono i titoli random nella pagina, cambiano 1 volta al dì
+ bloque = r'FILM RANDOM.*?class="listSubCat">(.*?)'
+ action = 'findvideos'
+
+ patron = r'(.*?)<'
- # da qui fare le opportuni modifiche
- if item.args[1] == 'genres':
- bloque = scrapertools.find_single_match(data, '')
- elif item.args[1] == 'years':
- bloque = scrapertools.find_single_match(data, '')
- elif item.args[1] == 'quality':
- bloque = scrapertools.find_single_match(data, '')
- elif item.args[1] == 'lucky': # sono i titoli random nella pagina, alcuni rimandano solo a server a pagamento
- bloque = scrapertools.find_single_match(data, 'FILM RANDOM.*?class="listSubCat">(.*?)')
- patron = '(.*?)<'
- matches = scrapertools.find_multiple_matches(bloque, patron)
-
- if item.args[1] == 'lucky':
- bloque = scrapertools.find_single_match(data, 'FILM RANDOM.*?class="listSubCat">(.*?)')
- patron = '(.*?)<'
- matches = scrapertools.find_multiple_matches(bloque, patron)
-
- for scrapurl, scraptitle in sorted(matches):
- if item.args[1] != 'lucky':
- url = host+scrapurl
- action="peliculas"
- else:
- url = scrapurl
- action = "findvideos_film"
- itemlist.append(Item(
- channel=item.channel,
- action=action,
- title = scraptitle,
- url=url,
- thumbnail=get_thumb(scraptitle, auto = True),
- Folder = True,
- ))
-
- return itemlist
-
-
-# =========== def pagina del film con i server per verderlo =============
-# da sistemare che ne da solo 1 come risultato
-
-def findvideos(item):
- logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
- itemlist = []
- # scarico la pagina
- #data = scrapertools.cache_page(item.url) #non funziona più?
- data = httptools.downloadpage(item.url, headers=headers).data
- # da qui fare le opportuni modifiche
- patron = ''
- matches = scrapertools.find_multiple_matches(data, patron)
- #logger.info("altadefinizione01_linkMATCHES: %s " % matches)
- for scrapedurl in matches:
-
- try:
- itemlist = servertools.find_video_items(data=data)
-
- for videoitem in itemlist:
- logger.info("Videoitemlist2: %s" % videoitem)
- videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title)#"[%s] %s" % (videoitem.server, item.title) #"[%s]" % (videoitem.title)
- videoitem.show = item.show
- videoitem.contentTitle = item.contentTitle
- videoitem.contentType = item.contentType
- videoitem.channel = item.channel
- videoitem.year = item.infoLabels['year']
- videoitem.infoLabels['plot'] = item.infoLabels['plot']
- except AttributeError:
- logger.error("data doesn't contain expected URL")
-
- # Controlla se i link sono validi
- if checklinks:
- itemlist = servertools.check_list_links(itemlist, checklinks_number)
-
- # Requerido para FilterTools
- itemlist = filtertools.get_links(itemlist, item, list_language)
-
- # Requerido para AutoPlay
- autoplay.start(itemlist, item)
-
- # Aggiunge alla videoteca
- if item.extra != 'findvideos' and item.extra != "library" and config.get_videolibrary_support() and len(itemlist) != 0 :
- support.videolibrary(itemlist, item)
+ listGroups = ['url','title']
+ itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
+ headers= headers, patron_block = bloque,
+ action=action)
return itemlist
# =========== def per cercare film/serietv =============
#host+/index.php?do=search&story=avatar&subaction=search
def search(item, text):
- logger.info("%s mainlist search log: %s %s" % (__channel__, item, text))
+ support.log()
itemlist = []
text = text.replace(" ", "+")
item.url = host+"/index.php?do=search&story=%s&subaction=search" % (text)
- #item.extra = "search"
try:
return peliculas(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
@@ -211,20 +132,18 @@ def search(item, text):
# =========== def per le novità nel menu principale =============
def newest(categoria):
- logger.info("%s mainlist search log: %s" % (__channel__, categoria))
+ support.log(categoria)
itemlist = []
item = Item()
- #item.extra = 'film'
try:
- if categoria == "film":
+ if categoria == "peliculas":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
-
- # Continua la ricerca in caso di errore
+ # Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -232,3 +151,18 @@ def newest(categoria):
return []
return itemlist
+
+def findvideos(item):
+ support.log()
+
+ itemlist = support.server(item, headers=headers)
+
+ # Requerido para FilterTools
+ # itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+ autoplay.start(itemlist, item)
+
+ support.videolibrary(itemlist, item, 'color kod')
+
+ return itemlist
diff --git a/core/support.py b/core/support.py
index 3aa022a2..c91e8cdc 100644
--- a/core/support.py
+++ b/core/support.py
@@ -95,7 +95,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
patronNext="", action="findvideos", addVideolibrary = True, type_content_dict={}, type_action_dict={}):
# patron: the patron to use for scraping page, all capturing group must match with listGroups
# listGroups: a list containing the scraping info obtained by your patron, in order
- # accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating
+ # accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating, episode, lang
# header: values to pass to request header
# blacklist: titles that you want to exclude(service articles for example)
@@ -112,7 +112,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
# patron = 'blablabla'
# headers = [['Referer', host]]
# blacklist = 'Request a TV serie!'
- # return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot'],
+ # return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot', 'episode', 'lang'],
# headers=headers, blacklist=blacklist)
# 'type' is a check for typologies of content e.g. Film or TV Series
# 'episode' is a key to grab episode numbers if it is separated from the title
@@ -136,7 +136,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
block = ""
for b in blocks:
- block += "\n" + b
+ block += "\n" + str(b)
log('BLOCK ', n, '=', block)
else:
block = data
@@ -144,7 +144,8 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
- known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type'] #by greko aggiunto episode
+ known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] #by greko aggiunto episode
+
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
@@ -157,7 +158,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
- title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
+ title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).replace('"', "'").strip() # fix by greko da " a '
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
longtitle = typo(title, 'bold')
@@ -168,6 +169,12 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
if scraped['title2']:
title2 = scrapertoolsV2.decodeHtmlentities(scraped["title2"]).strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
+ if scraped["lang"]:
+ if 'sub' in scraped["lang"].lower():
+ lang = 'Sub-ITA'
+ else:
+ lang = 'ITA'
+ longtitle += typo(lang, '_ [] color kod')
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
infolabels = item.infoLabels