feat: controllo e scrittura file channels.json nel menu (Redirect)

aggiunta la voce redirect nel menu, per controllare gli url dei canali e scrivere il file channels.json,
al momento scrive ancora il file channels-test.json.
aggiunta la stessa voce nel file launcher.py ma commentata, riga 44 e 45 così potete testarla, la def
parte solo se il device ha superato tutte le prove di check connessione
This commit is contained in:
greko17
2019-08-05 09:51:12 +02:00
parent 0a9114bc49
commit 76130a91f6
9 changed files with 128 additions and 458 deletions

View File

@@ -1,13 +1,13 @@
{
"altadefinizione01_club": "https://www.altadefinizione01.cc",
"altadefinizione01_link": "http://altadefinizione1.link",
"altadefinizione01_link": "http://altadefinizione1.com",
"altadefinizione01": "https://www.altadefinizione01.cc",
"altadefinizioneclick": "https://altadefinizione.cloud",
"altadefinizionehd": "https://altadefinizionetv.best",
"animeforge": "https://ww1.animeforce.org",
"animeleggendari": "https://animepertutti.com",
"animestream": "https://www.animeworld.it",
"animespace": "https://www.animespace.tv",
"animespace": "http://www.animespace.tv",
"animesubita": "http://www.animesubita.org",
"animetubeita": "http://www.animetubeita.com",
"animevision": "https://www.animevision.it",
@@ -21,7 +21,7 @@
"dreamsub": "https://www.dreamsub.stream",
"eurostreaming": "https://eurostreaming.pink",
"fastsubita": "http://fastsubita.com",
"filmigratis": "https://filmigratis.net",
"filmigratis": "http://filmigratis.org",
"filmgratis": "https://www.filmaltadefinizione.net",
"filmontv": "https://www.comingsoon.it",
"filmpertutti": "https://www.filmpertutti.pub",

View File

@@ -8,7 +8,7 @@ from core import servertools, support, jsontools
from core.item import Item
from platformcode import config, logger
__channel__ = "altadefinizione01_club"
__channel__ = "altadefinizione01"
host = config.get_channel_url(__channel__)
@@ -22,9 +22,7 @@ list_quality = ['default']
@support.menu
def mainlist(item):
film = ''
filmSub = [
film = [
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
('Generi', ['', 'categorie', 'genres']),
('Lettera', ['/catalog/a/', 'categorie', 'orderalf']),
@@ -40,17 +38,19 @@ def peliculas(item):
support.log('peliculas',item)
action="findvideos"
if item.args == "search":
patronBlock = r'</script> <div class="boxgrid caption">(.*?)<div id="right_bar">'
else:
patronBlock = r'<div class="cover_kapsul ml-mask">(.*?)<div class="page_nav">'
## if item.args == "search":
## patronBlock = r'</script> <div class="boxgrid caption">(.*?)<div id="right_bar">'
## else:
## patronBlock = r'<div class="cover_kapsul ml-mask">(.*?)<div class="page_nav">'
patron = r'<div class="cover boxcaption"> <h2>.<a href="(?P<url>[^"]+)">.*?<.*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+<div class="trdublaj"> (?P<quality>[A-Z]+)<[^>]+>(?:.[^>]+>(?P<lang>.*?)<[^>]+>).*?'\
'<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> '\
'[^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+).+?>'
patronNext = '<span>\d</span> <a href="([^"]+)">'
## support.regexDbg(item, patron, headers)
return locals()
@support.scrape

View File

@@ -22,9 +22,7 @@ list_quality = ['default']
@support.menu
def mainlist(item):
film = ''
filmSub = [
film = [
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
('Generi', ['', 'categorie', 'genres']),
('Lettera', ['/catalog/a/', 'categorie', 'orderalf']),

View File

@@ -15,32 +15,31 @@ from specials import autoplay, autorenumber
__channel__ = "animeleggendari"
host = config.get_channel_url(__channel__)
# Richiesto per Autoplay
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango']
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
list_servers = ['verystream','openload','rapidvideo','streamango']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'animeleggendari')
checklinks_number = config.get_setting('checklinks_number', 'animeleggendari')
@support.menu
def mainlist(item):
log()
itemlist = []
menu(itemlist, 'Anime Leggendari', 'peliculas', host + '/category/anime-leggendari/')
menu(itemlist, 'Anime ITA', 'peliculas', host + '/category/anime-ita/')
menu(itemlist, 'Anime SUB-ITA', 'peliculas', host + '/category/anime-sub-ita/')
menu(itemlist, 'Anime Conclusi', 'peliculas', host + '/category/serie-anime-concluse/')
menu(itemlist, 'Anime in Corso', 'peliculas', host + '/category/anime-in-corso/')
menu(itemlist, 'Genere', 'genres', host)
menu(itemlist, 'Cerca...', 'search')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
anime = ''
animeSub = [
('Leggendari', ['/category/anime-leggendari/', 'peliculas']),
('ITA', ['/category/anime-ita/', 'peliculas']),
('SUB-ITA', ['/category/anime-sub-ita/', 'peliculas']),
('Conclusi', ['/category/serie-anime-concluse/', 'peliculas']),
('in Corso', ['/category/anime-in-corso/', 'peliculas']),
('Genere', ['', 'genres'])
]
return locals()
return itemlist
def search(item, texto):
log(texto)
@@ -48,17 +47,21 @@ def search(item, texto):
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
@support.scrape
def last_ep(item):
log('ANIME PER TUTTI')
return support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url','title'],patron_block='<ul class="mh-tab-content-posts">(.*?)<\/ul>', action='findvideos')
action = 'findvideos'
patron = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
patron_block = r'<ul class="mh-tab-content-posts">(.*?)<\/ul>'
def newest(categoria):
log('ANIME PER TUTTI')
@@ -73,7 +76,7 @@ def newest(categoria):
if itemlist[-1].action == "last_ep":
itemlist.pop()
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -82,11 +85,17 @@ def newest(categoria):
return itemlist
@support.scrape
def genres(item):
itemlist = support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url', 'title'], action='peliculas', patron_block=r'Generi.*?<ul.*?>(.*?)<\/ul>', blacklist=['Contattaci','Privacy Policy', 'DMCA'])
return support.thumb(itemlist)
log()
def peliculas(item):
action = 'peliculas'
blacklist = ['Contattaci','Privacy Policy', 'DMCA']
patron = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
patron_block = r'Generi.*?<ul.*?>(.*?)<\/ul>'
return locals()
def peliculas(item):
log()
itemlist = []
@@ -94,11 +103,11 @@ def peliculas(item):
matches, data = support.match(item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+')
for url, title, thumb in matches:
title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace("streaming", "")
title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace("streaming", "")
lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))")
videoType = ''
videoType = ''
if 'movie' in title.lower():
videoType = ' - (MOVIE)'
videoType = ' - (MOVIE)'
if 'ova' in title.lower():
videoType = ' - (OAV)'
@@ -117,13 +126,13 @@ def peliculas(item):
action=action,
contentType=contentType,
title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang,'_ [] color kod'),
fulltitle=cleantitle,
fulltitle=cleantitle,
show=cleantitle,
url=url,
thumbnail=thumb))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
autorenumber.renumber(itemlist)
support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">')
return itemlist
@@ -155,7 +164,7 @@ def episodios(item):
fulltitle=item.title,
url=url,
thumbnail=item.thumbnail))
autorenumber.renumber(itemlist, item)
support.videolibrary
return itemlist
@@ -171,16 +180,16 @@ def findvideos(item):
log('DATA',data)
if 'animepertutti' in data:
log('ANIMEPERTUTTI!')
else:
data = ''
itemlist = support.server(item,data)
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -4,8 +4,8 @@
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://www.filmpertutti.club/wp-content/themes/blunge/assets/logo.png",
"banner": "https://www.filmpertutti.club/wp-content/themes/blunge/assets/logo.png",
"thumbnail": "",
"banner": "",
"categories": ["tvshow","movie"],
"settings": [
{

View File

@@ -1,353 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Serietvsubita
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ----------------------------------------------------------
import re
import time
from core import httptools, tmdb, scrapertools, support
from core.item import Item
from core.support import log
from platformcode import logger, config
__channel__ = "serietvsubita"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['gounlimited', 'verystream', 'streamango', 'openload']
list_quality = ['default']
def mainlist(item):
log()
itemlist = []
support.menu(itemlist, 'Novità bold', 'peliculas_tv', host, 'tvshow')
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow')
('Archivio A-Z ', [, 'list_az', ]), 'tvshow', args=['serie'])
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('Game of Thrones ','')\
.replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
log()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data, r'<div class="entry">([\s\S.]*?)<div class="post').replace('..:: Episodio ', 'Episodio ').strip()
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode + ' ', 'Episodio ' + episode + ' ')
blocco = blocco.replace('Episodio ', '..:: Episodio ')
episodio = item.infoLabels['episode']
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
log(patron)
log(blocco)
matches = scrapertools.find_multiple_matches(blocco, patron)
if len(matches):
data = matches[0][0]
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers2 = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers2).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
log()
itemlist = []
PERPAGE = 15
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
if '||' in item.url:
series = item.url.split('\n\n')
matches = []
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Extrae las entradas
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
scrapedplot = ""
scrapedthumbnail = ""
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
if len(matches) >= p * PERPAGE:
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item, itemlist=[]):
log()
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += r'<p><a href="([^"]+)">'
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
if "(Completa)" in scrapedtitle:
data = httptools.downloadpage(scrapedurl, headers=headers).data
scrapedtitle = scrapedtitle.replace(" Miniserie", " Stagione 1")
title = scrapedtitle.split(" Stagione")[0].strip()
# recupero la stagione
season = scrapertools.find_single_match(scrapedtitle, 'Stagione ([0-9]*)')
blocco = scrapertools.find_single_match(data, '<div class="entry">[\s\S.]*?<div class="post')
blocco = blocco.replace('<strong>Episodio ', '<strong>Episodio ').replace(' </strong>', ' </strong>')
blocco = blocco.replace('<strong>Episodio ', '<strong>S' + season.zfill(2) + 'E')
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
episodes = []
if len(matches) > 0:
for fullepisode_s, season, episode in matches:
season = season.lstrip("0")
episodes.append([
"".join([season, "x", episode]),
season,
episode
])
else:
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
episodes = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
for fullepisode, season, episode in episodes:
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode
fullepisode += ' ' + support.typo("Sub-ITA", '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=fullepisode,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
contentSerieName=title,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazionazione
patron = r'<strong class="on">\d+</strong>\s*<a href="([^<]+)">\d+</a>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
item.url = next_page
itemlist = episodios(item, itemlist)
else:
item.url = item.originalUrl
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas_tv(item):
log()
itemlist = []
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]:
continue
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
infoLabels = {}
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
if episode: # workaround per quando mettono le serie intere o altra roba, sarebbero da intercettare TODO
episode = episode[0]
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2].zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patron = r'<strong class="on">\d+</strong>\s<a href="([^<]+)">\d+</a>'
support.nextPage(itemlist, item, data, patron)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
log(categoria)
itemlist = []
item = Item()
item.url = host
item.extra = 'serie'
try:
if categoria == "series":
itemlist = peliculas_tv(item)
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
log(texto)
itemlist = []
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if texto.upper() in scrapedtitle.upper():
scrapedthumbnail = ""
scrapedplot = ""
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def list_az(item):
log()
itemlist = []
alphabet = dict()
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist
# ================================================================================================================

View File

@@ -19,11 +19,9 @@ def getmainlist(view="thumb_"):
################################################################
################################################################
# Questa voce è per TESTING e NON DOVRà MAI andare in stable
## itemlist.append(Item(title="KDICC", channel="checkhost", action="check",
## thumbnail='',
## category=config.get_localized_string(30119), viewmode="thumbnails",
## context=[{"title": config.get_localized_string(70285), "channel": "checkhost", "action": "menu_opciones",
## "goto": True}]))
itemlist.append(Item(title="Redirect", channel="checkhost", action="check_channels",
thumbnail='',
category=config.get_localized_string(30119), viewmode="thumbnails"))
################################################################
################################################################
# Añade los canales que forman el menú principal

View File

@@ -34,11 +34,16 @@ def start():
# se lo ha: non lo fa entrare nell'addon
# se ha problemi di DNS avvia ma lascia entrare
# se tutto ok: entra nell'addon
from specials.checkhost import test_conn
test_conn(is_exit = True, check_dns = True, view_msg = True,
from specials.checkhost import test_conn, check_channels
check_adsl = test_conn(is_exit = True, check_dns = True, view_msg = True,
lst_urls = [], lst_site_check_dns = [], in_addon = True)
# Permette di scrivere il file channels.json
# controllando gli url del file solo se tutti i check
# della connessione sono andati a buon fine
## if check_adsl:
## check_channels()
def run(item=None):
logger.info()
if not item:

View File

@@ -5,6 +5,8 @@ import json
from platformcode import config, logger
import requests
from requests.exceptions import HTTPError
import httplib2
import socket
addon = xbmcaddon.Addon()
addonname = addon.getAddonInfo('name')
@@ -19,7 +21,7 @@ LST_SITE_CHCK_DNS = ['https://www.italia-film.pw', 'https://casacinema.space',
class Kdicc():
def __init__(self, is_exit = True, check_dns = True, view_msg = True,
lst_urls = [], lst_site_check_dns = [], in_addon = False):
@@ -60,7 +62,7 @@ class Kdicc():
"""
controllo se il device raggiunge i siti
"""
urls = LIST_SITE
r = self.rqst(urls)
http_errr = 0
@@ -104,10 +106,10 @@ class Kdicc():
return : (esito, sito, url, code, reurl)
"""
rslt_final = []
if lst_urls == []:
lst_urls = self.lst_urls
for sito in lst_urls:
rslt = {}
try:
@@ -125,7 +127,7 @@ class Kdicc():
rslt['isRedirect'] = is_redirect
rslt['history'] = r.history
xbmc.log("Risultato nel try: %s" % (r,), level=xbmc.LOGNOTICE)
except requests.exceptions.ConnectionError as conn_errr:
# Errno 10061 per s.o. win
# gli Errno 10xxx e 11xxx saranno da compattare in qualche modo?
@@ -135,7 +137,6 @@ class Kdicc():
or 'ConnectTimeoutError' in str(conn_errr) \
or 'Errno 11002' in str(conn_errr) or 'ReadTimeout' in str(conn_errr) \
or 'Errno 11001' in str(conn_errr): # questo errore è anche nel code: -2
# nei casi in cui vogliamo raggiungere certi siti...
rslt['code'] = '111'
rslt['url'] = str(sito)
rslt['http_err'] = 'Connection refused'
@@ -148,6 +149,30 @@ class Kdicc():
return rslt_final
def http_Resp(self):
rslt = {}
for sito in self.lst_urls:
try:
s = httplib2.Http()
code, resp = s.request(sito, body=None)
if code.previous:
xbmc.log("r1 http_Resp: %s %s %s %s" %
(code.status, code.reason, code.previous['status'],
code.previous['-x-permanent-redirect-url']), level=xbmc.LOGNOTICE)
rslt['code'] = code.previous['status']
rslt['redirect'] = code.previous['-x-permanent-redirect-url']
rslt['status'] = code.status
else:
rslt['code'] = code.status
except httplib2.ServerNotFoundError as msg:
# sia per mancanza di ADSL che per i siti non esistenti
rslt['code'] = -2
except socket.error as msg:
# per siti irraggiungibili senza DNS corretti
#[Errno 111] Connection refused
rslt['code'] = 111
return rslt
def view_Advise(self, txt = '' ):
"""
Avviso per utente
@@ -174,50 +199,34 @@ def test_conn(is_exit, check_dns, view_msg,
ktest = Kdicc(is_exit, check_dns, view_msg, lst_urls, lst_site_check_dns, in_addon)
# se non ha l'ip lo comunico all'utente
# è utile nelle richieste di aiuto per l'addon che non funziona
# If you don't have an ip, I'll let you know.
# Is useful in requests for help for the addon that does not work
risultato = []
if not ktest.check_Ip():
# non permetto di entrare nell'addon
# I don't let you get into the addon
# inserire codice lingua
if view_msg == True:
# inserire codice lingua
ktest.view_Advise(config.get_localized_string(70720))
if ktest.is_exit == True:
exit()
# se non ha connessione ADSL lo comunico all'utente
elif not ktest.check_Adsl():
if view_msg == True:
# inserire codice lingua
ktest.view_Advise(config.get_localized_string(70721))
if ktest.is_exit == True:
exit()
# se ha i DNS filtrati lo comunico all'utente
elif ktest.check_dns:
if not ktest.check_Dns():
if view_msg == True:
# inserire codice lingua
ktest.view_Advise(config.get_localized_string(70722))
## if ktest.is_exit == True:
## exit()
## else:
# Lasciando solo else al posto dell'if sotto, l'else non viene considerato!
## if ktest.check_Ip() and ktest.check_Adsl() and ktest.check_Dns():
## # tutto ok! entro nell'addon
## if view_msg == True and in_addon == False:
## ktest.view_Advise('Configurazione rete OK!\n')
## for ris in ktest.rqst(lst_urls):
## risultato.append(ris)
return risultato
if ktest.check_Ip() and ktest.check_Adsl() and ktest.check_Dns():
return True
else:
return False
# def per la creazione del file channels.json
"""
def check_channels(inutile=''):
"""
leggo gli host dei canali dal file channels.json
li controllo
scrivo il file channels-test.json
@@ -232,8 +241,7 @@ def test_conn(is_exit, check_dns, view_msg,
Nel caso accada un problema, il controllo e relativa scrittura del file viene interrotto
con messaggio di avvertimento
"""
def check(item):
"""
logger.info()
folderJson = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('path')).decode('utf-8')
@@ -241,36 +249,41 @@ def check(item):
with open(folderJson+'/'+fileJson) as f:
data = json.load(f)
## logger.info("DATA :%s" % data)
risultato = {}
for chann, host in sorted(data.items()):
ris = []
# per avere un'idea della tempistica
# utile solo se si controllano tutti i canali
# per i canali con error 522 si perdono circa 40 sec...
logger.info("check #### INIZIO #### channel - host :%s - %s " % (chann, host))
#lst_host = []
#lst_host.append(host)
lst_host = [host]
rslt = Kdicc(lst_urls = [host]).http_Resp()
rslt = test_conn(is_exit = True, check_dns = False, view_msg = True,
lst_urls = lst_host, lst_site_check_dns = [], in_addon = True)
logger.info("check #### FINE #### rslt :%s " % (rslt))
rslt = rslt[0]
# tutto ok
if rslt['code'] == 200 and rslt['isRedirect'] == False:
if rslt['code'] == 200:
risultato[chann] = host
# redirect
elif rslt['code'] == 200 and rslt['isRedirect'] == True:
risultato[chann] = str(rslt['code']) +' - '+ rslt['rdrcturl']
elif str(rslt['code']).startswith('3'):
#risultato[chann] = str(rslt['code']) +' - '+ rslt['redirect'][:-1]
if rslt['redirect'].endswith('/'):
rslt['redirect'] = rslt['redirect'][:-1]
risultato[chann] = rslt['redirect']
# sito inesistente
elif rslt['code'] == -2:
risultato[chann] = 'Host Sconosciuto - '+ str(rslt['code']) +' - '+ host
# sito non raggiungibile - probabili dns non settati
elif rslt['code'] == 111:
risultato[chann] = ['Host non raggiungibile - '+ str(rslt['code']) +' - '+ host]
else:
# altri tipi di errore
risultato[chann] = 'Errore Sconosciuto - '+str(rslt['code']) +' - '+ host
#risultato[chann] = 'Errore Sconosciuto - '+str(rslt['code']) +' - '+ host
risultato[chann] = host
logger.info("check #### FINE #### rslt :%s " % (rslt))
fileJson_test = 'channels-test.json'
# scrivo il file aggiornato
with open(folderJson+'/'+fileJson_test, 'w') as f: