diff --git a/_config.yml b/_config.yml
new file mode 100644
index 00000000..18854876
--- /dev/null
+++ b/_config.yml
@@ -0,0 +1 @@
+theme: jekyll-theme-midnight
\ No newline at end of file
diff --git a/channels/eurostreaming.json b/channels/eurostreaming.json
index 186b3a5b..d63dde60 100644
--- a/channels/eurostreaming.json
+++ b/channels/eurostreaming.json
@@ -4,8 +4,8 @@
"active": true,
"adult": false,
"language": ["ita"],
- "thumbnail": "",
- "bannermenu": "",
+ "thumbnail": "https://eurostreaming.cafe/wp-content/uploads/2017/08/logocafe.png",
+ "bannermenu": "https://eurostreaming.cafe/wp-content/uploads/2017/08/logocafe.png",
"categories": ["tvshow","anime"],
"settings": [
{
@@ -66,8 +66,8 @@
"visible": true,
"lvalues": [
"Non filtrare",
- "ITA",
- "SUB ITA"
+ "Italiano",
+ "vosi"
]
},
{
diff --git a/channels/eurostreaming.py b/channels/eurostreaming.py
index c9c6cd9a..2d29a533 100644
--- a/channels/eurostreaming.py
+++ b/channels/eurostreaming.py
@@ -1,281 +1,133 @@
# -*- coding: utf-8 -*-
-# -*- Created or modificated for Alfa-Addon -*-
-# -*- adpted for KOD -*-
-# -*- By Greko -*-
+# ------------------------------------------------------------
+# Canale per Eurostreaming
+# adattamento di Cineblog01
+# by Greko
+# ------------------------------------------------------------
+"""
+ Riscritto per poter usufruire del modulo support.
+ Problemi noti:
+ Alcun regex possono migliorare
+ server versystream : 'http://vcrypt.net/very/' # VeryS non decodifica il link :http://vcrypt.net/fastshield/
+ server nowvideo.club da implementare nella cartella servers, altri server nei meandri del sito?!
+ Alcune sezioni di anime-cartoni non vanno, alcune hanno solo la lista degli episodi, ma non hanno link
+ altre cambiano la struttura
+ La sezione novità non fa apparire il titolo degli episodi
+"""
-#import base64
import re
-import urlparse
-# gli import sopra sono da includere all'occorrenza
-# per url con ad.fly
-from lib import unshortenit
-from channelselector import get_thumb
-from channels import autoplay
-from channels import filtertools
-from core import httptools
-from core import scrapertoolsV2
-from core import servertools
+from channels import autoplay, filtertools, support
+from core import scrapertoolsV2, httptools, servertools, tmdb
from core.item import Item
-from core import channeltools
-from core import tmdb
-from platformcode import config, logger
+from platformcode import logger, config
-__channel__ = "eurostreaming" #stesso di id nel file json
-#host = "https://eurostreaming.zone/"
-#host = "https://eurostreaming.black/"
-host = "https://eurostreaming.cafe/" #aggiornato al 30-04-2019
+host = "https://eurostreaming.cafe/"
+headers = ['Referer', host]
-# ======== def per utility INIZIO =============================
-try:
- __modo_grafico__ = config.get_setting('modo_grafico', __channel__)
- __perfil__ = int(config.get_setting('perfil', __channel__))
-except:
- __modo_grafico__ = True
- __perfil__ = 0
-
-# Fijar perfil de color
-perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
- ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
- ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
-
-if __perfil__ < 3:
- color1, color2, color3, color4, color5 = perfil[__perfil__]
-else:
- color1 = color2 = color3 = color4 = color5 = ""
-
-__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
-__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
-
-headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
- ['Referer', host]]#,['Accept-Language','it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3']]
-
-parameters = channeltools.get_channel_parameters(__channel__)
-fanart_host = parameters['fanart']
-thumbnail_host = parameters['thumbnail']
-
-IDIOMAS = {'Italiano': 'IT', 'VOSI':'SUB ITA'}
+IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
-# per l'autoplay
-list_servers = ['openload', 'speedvideo', 'wstream', 'streamango' 'flashx', 'nowvideo']
-list_quality = ['default']
+list_servers = ['verystream', 'wstream', 'speedvideo', 'flashx', 'nowvideo', 'streamango', 'deltabit', 'openload']
+list_quality = ['default']
-# =========== home menu ===================
+__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'eurostreaming')
+__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'eurostreaming')
def mainlist(item):
- logger.info("icarus.eurostreaming mainlist")
+ support.log()
itemlist = []
- title = ''
+
+ support.menu(itemlist, 'Serie TV', 'serietv', host, 'episode') # mettere sempre episode per serietv, anime!!
+ support.menu(itemlist, 'Serie TV Archivio submenu', 'serietv', host + "category/serie-tv-archive/", 'episode')
+ support.menu(itemlist, 'Ultimi Aggiornamenti submenu', 'serietv', host + 'aggiornamento-episodi/', 'episode', args='True')
+ support.menu(itemlist, 'Anime / Cartoni', 'serietv', host + 'category/anime-cartoni-animati/', 'episode')
+ support.menu(itemlist, 'Cerca...', 'search', host, 'episode')
+ # richiesto per autoplay
autoplay.init(item.channel, list_servers, list_quality)
-
- itemlist = [
- Item(channel=__channel__, title="Serie TV",
- contentTitle = __channel__, action="serietv",
- #extra="tvshow",
- text_color=color4,
- url="%s/category/serie-tv-archive/" % host,
- infoLabels={'plot': item.category},
- thumbnail = get_thumb(title, auto = True)
- ),
- Item(channel=__channel__, title="Ultimi Aggiornamenti",
- contentTitle = __channel__, action="elenco_aggiornamenti_serietv",
- text_color=color4, url="%saggiornamento-episodi/" % host,
- #category = __channel__,
- extra="tvshow",
- infoLabels={'plot': item.category},
- thumbnail = get_thumb(title, auto = True)
- ),
- Item(channel=__channel__,
- title="Anime / Cartoni",
- action="serietv",
- extra="tvshow",
- text_color=color4,
- url="%s/category/anime-cartoni-animati/" % host,
- thumbnail= get_thumb(title, auto = True)
- ),
- Item(channel=__channel__,
- title="[COLOR yellow]Cerca...[/COLOR]",
- action="search",
- extra="tvshow",
- text_color=color4,
- thumbnail= get_thumb(title, auto = True)
- ),
- ]
-
autoplay.show_option(item.channel, itemlist)
-
+
return itemlist
-# ======== def in ordine di menu ===========================
-def serietv(item):
-
- logger.info("%s serietv log: %s" % (__channel__, item))
+def serietv(item):
+ support.log()
itemlist = []
- # Carica la pagina
- data = httptools.downloadpage(item.url).data
-
- # Estrae i contenuti
- patron = '
\s*
\s*
0:
- scrapedurl = urlparse.urljoin(item.url, matches[0])
- itemlist.append(
- Item(
- channel=item.channel,
- action="serietv",
- title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
- url=scrapedurl,
- thumbnail=
- "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
- extra=item.extra,
- folder=True))
+ if item.args:
+ # il titolo degli episodi viene inglobato in episode ma non sono visibili in newest!!!
+ patron = r'(.*?).[^–](.*?)<\/a>'
+ listGroups = ['title', 'url', 'episode']
+ patronNext = ''
+ else:
+ patron = r'.*?\s
(.*?(?:\((\d{4})\)|(\d{4}))?)<\/a><\/h2>'
+ listGroups = ['thumb', 'url', 'title', 'year', 'year']
+ patronNext='a class="next page-numbers" href="?([^>"]+)">Avanti »'
+ itemlist = support.scrape(item, patron_block='', patron=patron, listGroups=listGroups,
+ patronNext=patronNext,
+ action='episodios')
return itemlist
def episodios(item):
- #logger.info("%s episodios log: %s" % (__channel__, item))
+ support.log()
itemlist = []
-
- if not(item.lang):
- lang_season = {'ITA':0, 'SUB ITA' :0}
- # Download pagina
+
+ # Carica la pagina
+ data = httptools.downloadpage(item.url).data
+ #========
+ if 'clicca qui per aprire' in data.lower():
+ item.url = scrapertoolsV2.find_single_match(data, '"go_to":"(.*?)"')
+ item.url = item.url.replace("\\","")
+ # Carica la pagina
data = httptools.downloadpage(item.url).data
- #========
- if 'clicca qui per aprire' in data.lower():
- logger.info("%s CLICCA QUI PER APRIRE GLI EPISODI log: %s" % (__channel__, item))
- item.url = scrapertoolsV2.find_single_match(data, '"go_to":"(.*?)"')
- item.url = item.url.replace("\\","")
- # Carica la pagina
- data = httptools.downloadpage(item.url).data
- #logger.info("%s FINE CLICCA QUI PER APRIRE GLI EPISODI log: %s" % (__channel__, item))
- elif 'clicca qui' in data.lower():
- logger.info("%s inizio CLICCA QUI log: %s" % (__channel__, item))
- item.url = scrapertoolsV2.find_single_match(data, '
')
+ for match in matches:
+ blocks = scrapertoolsV2.find_multiple_matches(match, r'(?:(\d×[a-zA-Z0-9].*?))
')
+ season_lang = scrapertoolsV2.find_single_match(match, r'<\/span>.*?STAGIONE\s+\d+\s\(([^<>]+)\)').strip()
+
+ logger.info("blocks log: %s" % ( blocks ))
+ for block in blocks:
+ season_n, episode_n = scrapertoolsV2.find_single_match(block, r'(\d+)(?:×|×)(\d+)')
+ titolo = scrapertoolsV2.find_single_match(block, r'[]\d+[ ](?:([a-zA-Z0-9;\s]+))[ ]?(?:[^<>])')
+ logger.info("block log: %s" % ( block ))
- # locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')
-
- return itemlist
+ titolo = re.sub(r'×|×', "x", titolo).replace("’","'")
+ item.infoLabels['season'] = season_n # permette di vedere il plot della stagione e...
+ item.infoLabels['episode'] = episode_n # permette di vedere il plot della puntata e...
+
+ itemlist.append(
+ Item(channel=item.channel,
+ action="findvideos",
+ contentType=item.contentType,
+ title="[B]" + season_n + "x" + episode_n + " " + titolo + "[/B] " + season_lang,
+ fulltitle=item.title, # Titolo nel video
+ show=titolo + ":" + season_n + "x" + episode_n, # sottotitoletto nel video
+ url=block,
+ extra=item.extra,
+ thumbnail=item.thumbnail,
+ infoLabels=item.infoLabels
+ ))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ support.videolibrary(itemlist, item)
- else:
- # qui ci vanno le puntate delle stagioni
- html = item.data
- logger.info("%s else log: [%s]" % (__channel__, item))
+ return itemlist
- if item.lang == 'SUB ITA':
- item.lang = '\(SUB ITA\)'
- logger.info("%s item.lang log: %s" % (__channel__, item.lang))
- bloque = scrapertoolsV2.find_single_match(html, '(.*?)
')
- patron = '
.*?'+item.lang+'
(.*?)' # leggo tutte le stagioni
- #logger.info("%s patronpatron log: %s" % (__channel__, patron))
- matches = scrapertoolsV2.find_multiple_matches(bloque, patron)
- for scrapedseason in matches:
- #logger.info("%s scrapedseasonscrapedseason log: %s" % (__channel__, scrapedseason))
- scrapedseason = scrapedseason.replace('','').replace('','')
- patron = '(\d+)×(\d+)(.*?)<(.*?)
' # stagione - puntanta - titolo - gruppo link
- matches = scrapertoolsV2.find_multiple_matches(scrapedseason, patron)
- for scrapedseason, scrapedpuntata, scrapedtitolo, scrapedgroupurl in matches:
- #logger.info("%s finale log: %s" % (__channel__, patron))
- scrapedtitolo = scrapedtitolo.replace('–','')
- itemlist.append(Item(channel = item.channel,
- action = "findvideos",
- contentType = "episode",
- #contentSerieName = item.contentSerieName,
- contentTitle = scrapedtitolo,
- title = '%sx%s %s' % (scrapedseason, scrapedpuntata, scrapedtitolo),
- url = scrapedgroupurl,
- fulltitle = item.fulltitle,
- #show = item.show,
- #folder = True,
- ))
-
- logger.info("%s itemlistitemlist log: %s" % (__channel__, itemlist))
-
- # Opción "Añadir esta película a la biblioteca de KODI"
- if item.extra != "library":
- if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
- itemlist.append(Item(channel=item.channel, title="%s" % config.get_localized_string(30161),
- text_color="green", extra="episodios",
- action="add_serie_to_library", url=item.url,
- thumbnail= get_thumb('videolibrary', auto = True),
- contentTitle=item.contentSerieName, lang = item.lang,
- show=item.show, data = html
- #, infoLabels = item.infoLabels
- ))
-
- return itemlist
# =========== def ricerca =============
def search(item, texto):
- #logger.info("[eurostreaming.py] " + item.url + " search " + texto)
- logger.info("%s search log: %s" % (__channel__, item))
+ support.log()
item.url = "%s?s=%s" % (host, texto)
+
try:
return serietv(item)
# Continua la ricerca in caso di errore
@@ -287,16 +139,16 @@ def search(item, texto):
# =========== def novità in ricerca globale =============
def newest(categoria):
- logger.info("%s newest log: %s" % (__channel__, categoria))
+ support.log()
itemlist = []
item = Item()
try:
-
+ item.args= 'True'
item.url = "%saggiornamento-episodi/" % host
- item.action = "elenco_aggiornamenti_serietv"
- itemlist = elenco_aggiornamenti_serietv(item)
+ item.action = "serietv"
+ itemlist = serietv(item)
- if itemlist[-1].action == "elenco_aggiornamenti_serietv":
+ if itemlist[-1].action == "serietv":
itemlist.pop()
# Continua la ricerca in caso di errore
@@ -308,99 +160,38 @@ def newest(categoria):
return itemlist
-# =========== def pagina aggiornamenti =============
-
-# ======== Ultimi Aggiornamenti ===========================
-def elenco_aggiornamenti_serietv(item):
- """
- def per la lista degli aggiornamenti
- """
- logger.info("%s elenco_aggiornamenti_serietv log: %s" % (__channel__, item))
- itemlist = []
-
- # Carica la pagina
- data = httptools.downloadpage(item.url).data
-
- # Estrae i contenuti
- #bloque = scrapertoolsV2.get_match(data, '(.*?)
')
- bloque = scrapertoolsV2.find_single_match(data, '
(.*?)
')
- patron = '
(.*?)<.*?href="(.*?)".*?>(.*?)<'
- matches = scrapertoolsV2.find_multiple_matches(bloque, patron)
-
- for scrapedtitle, scrapedurl, scrapedepisodies in matches:
- if "(SUB ITA)" in scrapedepisodies.upper():
- lang = "SUB ITA"
- scrapedepisodies = scrapedepisodies.replace('(SUB ITA)','')
- else:
- lang = "ITA"
- scrapedepisodies = scrapedepisodies.replace(lang,'')
- #num = scrapertoolsV2.find_single_match(scrapedepisodies, '(-\d+/)')
- #if num:
- # scrapedurl = scrapedurl.replace(num, "-episodi/")
- scrapedtitle = scrapedtitle.replace("–", "").replace('\xe2\x80\x93 ','').strip()
- scrapedepisodies = scrapedepisodies.replace('\xe2\x80\x93 ','').strip()
- itemlist.append(
- Item(
- channel=item.channel,
- action="episodios",
- contentType="tvshow",
- title = "%s" % scrapedtitle, # %s" % (scrapedtitle, scrapedepisodies),
- fulltitle = "%s %s" % (scrapedtitle, scrapedepisodies),
- text_color = color5,
- url = scrapedurl,
- #show = "%s %s" % (scrapedtitle, scrapedepisodies),
- extra=item.extra,
- #lang = lang,
- #data = data,
- folder=True))
-
- # locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')
-
- return itemlist
-
-# =========== def per trovare i video =============
-
def findvideos(item):
- logger.info("%s findvideos log: %s" % (__channel__, item))
- itemlist = []
-
- # Carica la pagina
- data = item.url
-
- matches = re.findall(r'a href="([^"]+)"[^>]*>[^<]+', data, re.DOTALL)
-
- data = []
- for url in matches:
- url, c = unshortenit.unshorten(url)
- data.append(url)
-
- try:
- itemlist = servertools.find_video_items(data=str(data))
-
- for videoitem in itemlist:
- logger.info("Videoitemlist2: %s" % videoitem)
- videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title)#"[%s] %s" % (videoitem.server, item.title) #"[%s]" % (videoitem.title)
- videoitem.show = item.show
- videoitem.contentTitle = item.contentTitle
- videoitem.contentType = item.contentType
- videoitem.channel = item.channel
- videoitem.text_color = color5
- #videoitem.language = item.language
- videoitem.year = item.infoLabels['year']
- videoitem.infoLabels['plot'] = item.infoLabels['plot']
- except AttributeError:
- logger.error("data doesn't contain expected URL")
-
- # Controlla se i link sono validi
- if __comprueba_enlaces__:
- itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
-
- # Requerido para FilterTools
- # itemlist = filtertools.get_links(itemlist, item, list_language)
-
- # Requerido para AutoPlay
- autoplay.start(itemlist, item)
-
+ support.log()
+ itemlist =[]
+
+ itemlist = support.server(item, item.url)
+
+ """
+ Questa parte funziona se non vanno bene le modifiche a support
+ """
+## support.log()
+## itemlist =[]
+## data= ''
+## logger.info("Url item.url: [%s] " % item.url)
+##
+## urls = scrapertoolsV2.find_multiple_matches(item.url, r'href="([^"]+)"')
+## itemlist = servertools.find_video_items(data=str(urls))
+##
+## for videoitem in itemlist:
+## videoitem.title = item.title + ' - [COLOR limegreen][[/COLOR]'+ videoitem.title+ ' [COLOR limegreen]][/COLOR]'
+## videoitem.fulltitle = item.fulltitle
+## videoitem.thumbnail = item.thumbnail
+## videoitem.show = item.show
+## videoitem.plot = item.plot
+## videoitem.channel = item.channel
+## videoitem.contentType = item.contentType
+##
+## # Controlla se i link sono validi
+## if __comprueba_enlaces__:
+## itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
+##
+## # richiesto per AutoPlay
+## autoplay.start(itemlist, item)
+
return itemlist
diff --git a/channels/support.py b/channels/support.py
index 1bdec0f9..139cc0b8 100644
--- a/channels/support.py
+++ b/channels/support.py
@@ -136,7 +136,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
- known_keys = ['url', 'title', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating']
+ known_keys = ['url', 'title', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating'] #by greko aggiunto episode
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
@@ -152,8 +152,10 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
- if scraped["quality"]:
- longtitle = '[B]' + title + '[/B] [COLOR blue][' + scraped["quality"] + '][/COLOR]'
+ if (scraped["quality"] and scraped["episode"]): # by greko aggiunto episode
+ longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B][COLOR blue][' + scraped["quality"] + '][/COLOR]' # by greko aggiunto episode
+ elif scraped["episode"]: # by greko aggiunto episode
+ longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B]' # by greko aggiunto episode
else:
longtitle = '[B]' + title + '[/B]'
@@ -438,7 +440,7 @@ def match(item, patron='', patron_block='', headers='', url=''):
def videolibrary(itemlist, item, typography=''):
- if item.contentType == 'movie':
+ if item.contentType != 'episode':
action = 'add_pelicula_to_library'
extra = 'findvideos'
contentType = 'movie'
@@ -448,28 +450,25 @@ def videolibrary(itemlist, item, typography=''):
contentType = 'tvshow'
title = typo(config.get_localized_string(30161) + ' ' + typography)
- if inspect.stack()[1][3] == 'findvideos' and contentType == 'movie' or inspect.stack()[1][3] != 'findvideos' and contentType != 'movie':
- if config.get_videolibrary_support() and len(itemlist) > 0:
- itemlist.append(
- Item(channel=item.channel,
- title=title,
- contentType=contentType,
- contentSerieName=item.fulltitle if contentType == 'tvshow' else '',
- url=item.url,
- action=action,
- extra=extra,
- contentTitle=item.fulltitle))
- return itemlist
+ if config.get_videolibrary_support() and len(itemlist) > 0:
+ itemlist.append(
+ Item(channel=item.channel,
+ title=title,
+ contentType=contentType,
+ contentSerieName=item.fulltitle if contentType == 'tvshow' else '',
+ url=item.url,
+ action=action,
+ extra=extra,
+ contentTitle=item.fulltitle))
+
def nextPage(itemlist, item, data, patron, function_level=1):
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
next_page = scrapertoolsV2.find_single_match(data, patron)
-
- if next_page != "":
- if 'http' not in next_page:
- next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
+ if 'http' not in next_page:
+ next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
log('NEXT= ', next_page)
if next_page != "":
@@ -484,17 +483,24 @@ def nextPage(itemlist, item, data, patron, function_level=1):
return itemlist
-
def server(item, data='', headers='', AutoPlay=True, CheckLinks=True):
-
+
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', item.channel)
log(__comprueba_enlaces__ )
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', item.channel)
log(__comprueba_enlaces_num__ )
-
+
if not data:
data = httptools.downloadpage(item.url, headers=headers).data
-
+ ## fix by greko
+ # se inviamo un blocco di url dove cercare i video
+ if type(data) == list:
+ data = str(item.url)
+ else:
+ # se inviamo un singolo url dove cercare il video
+ data = item.url
+ ## FINE fix by greko
+
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
@@ -529,4 +535,4 @@ def log(stringa1="", stringa2="", stringa3="", stringa4="", stringa5=""):
frame = inspect.stack()[1]
filename = frame[0].f_code.co_filename
filename = os.path.basename(filename)
- logger.info("[" + filename + "] - [" + inspect.stack()[1][3] + "] " + str(stringa1) + str(stringa2) + str(stringa3) + str(stringa4) + str(stringa5))
\ No newline at end of file
+ logger.info("[" + filename + "] - [" + inspect.stack()[1][3] + "] " + str(stringa1) + str(stringa2) + str(stringa3) + str(stringa4) + str(stringa5))
diff --git a/channelselector.py b/channelselector.py
index 4286eb8c..87323a90 100644
--- a/channelselector.py
+++ b/channelselector.py
@@ -96,6 +96,7 @@ def getchanneltypes(view="thumb_"):
# viewmode="thumbnails"))
+
itemlist.append(Item(title=config.get_localized_string(70685), channel="community", action="mainlist", view=view,
category=title, channel_type="all", thumbnail=get_thumb("channels_community.png", view),
viewmode="thumbnails"))
diff --git a/servers/decrypters/zcrypt.py b/servers/decrypters/zcrypt.py
index 4956587b..0b69451e 100644
--- a/servers/decrypters/zcrypt.py
+++ b/servers/decrypters/zcrypt.py
@@ -44,10 +44,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
elif 'vcrypt.net' in url:
from lib import unshortenit
data, status = unshortenit.unshorten(url)
-
+ logger.info("Data - Status zcrypt vcrypt.net: [%s] [%s] " %(data, status))
elif 'linkup' in url:
idata = httptools.downloadpage(url).data
data = scrapertoolsV2.find_single_match(idata, "