Merge branch 'master' of git://github.com/kodiondemand/addon into kodiondemand-master

This commit is contained in:
greko
2019-05-27 00:40:10 +02:00
6 changed files with 130 additions and 380 deletions

View File

@@ -25,7 +25,7 @@ def findhost():
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', 'wstream']
list_quality = ['HD', 'default']
list_quality = ['HD', 'SD', 'default']
checklinks = config.get_setting('checklinks', 'cineblog01')
checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
@@ -33,7 +33,7 @@ checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
# esclusione degli articoli 'di servizio'
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ',
'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬',
'Openload: la situazione. Benvenuto Verystream']
'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?']
def mainlist(item):
@@ -230,11 +230,10 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(streaming)
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.fulltitle + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
title=scrapedtitle,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
@@ -265,37 +264,18 @@ def findvideos(item):
# Estrae i contenuti - Streaming HD
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "yellow", "Streaming HD", "HD")
autoplay.start(itemlist, item)
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "pink", "Streaming 3D")
support.videolibrary(itemlist, item)
return support.server(item, itemlist=itemlist)
# Estrae i contenuti - Download
# load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")
# Estrae i contenuti - Download HD
# load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<tableclass=cbtable width=100% height=20>', "azure", "Download HD")
if len(itemlist) == 0:
itemlist = servertools.find_video_items(item=item)
# Requerido para Filtrar enlaces
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item)
return itemlist
def findvid_serie(item):
def load_vid_series(html, item, itemlist, blktxt):
@@ -306,11 +286,11 @@ def findvid_serie(item):
for match in matches:
scrapedurl = match.group(1)
scrapedtitle = match.group(2)
title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
# title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
title=scrapedtitle,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
@@ -354,9 +334,7 @@ def findvid_serie(item):
else:
load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist, lnkblk[i])
autoplay.start(itemlist, item)
return itemlist
return support.server(item, itemlist=itemlist)
def play(item):
@@ -390,4 +368,4 @@ def play(item):
else:
data = support.swzz_get_url(item)
return support.server(item, data, headers)
return servertools.find_video_items(data=data)

View File

@@ -56,7 +56,7 @@ def serietv(item):
if item.args:
# il titolo degli episodi viene inglobato in episode ma non sono visibili in newest!!!
patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"\s+target="_blank">(.*?)<\/a>'
listGroups = ['title', 'url', 'episode']
listGroups = ['title', 'url', 'title2']
patronNext = ''
else:
patron = r'<div class="post-thumb">.*?\s<img src="([^"]+)".*?><a href="([^"]+)".*?>(.*?(?:\((\d{4})\)|(\d{4}))?)<\/a><\/h2>'

View File

@@ -4,83 +4,51 @@
# Canale per ilgeniodellostreaming
# ------------------------------------------------------------
import re
import urlparse
from core import scrapertools, servertools, httptools
from core import tmdb
from platformcode import logger
from core import scrapertoolsV2, httptools, tmdb, support
from core.support import log, menu, aplay
from core.item import Item
from platformcode import config, logger
from specials import autoplay
__channel__ = "ilgeniodellostreaming"
host = "https://ilgeniodellostreaming.pw"
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', 'youtube']
list_servers = ['verystream', 'openload', 'streamango']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'ilgeniodellostreaming')
checklinks_number = config.get_setting('checklinks_number', 'ilgeniodellostreaming')
headers = [['Referer', host]]
PERPAGE = 10
def mainlist(item):
logger.info("kod.ilgeniodellostreaming mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Ultimi Film Inseriti[/COLOR]",
action="peliculas",
url="%s/film/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Film Per Categoria[/COLOR]",
action="categorias",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV[/COLOR]",
action="serie",
url="%s/serie/" % host,
thumbnail="http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"),
Item(channel=__channel__,
title="[COLOR azure]Nuovi Episodi Serie TV[/COLOR]",
action="nuoviep",
url="%s/aggiornamenti-serie/" % host,
thumbnail="http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"),
Item(channel=__channel__,
title="[COLOR azure]Anime[/COLOR]",
action="serie",
url="%s/anime/" % host,
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
log()
itemlist = []
menu(itemlist, 'Film', 'peliculas', host + '/film/')
menu(itemlist, 'Film Per Categoria', 'category', host, args='genres')
menu(itemlist, 'Film Per Anno', 'category', host, args='year')
menu(itemlist, 'Serie TV', 'peliculas', host + '/serie/', 'episode')
menu(itemlist, 'Nuovi Episodi Serie TV submenu', 'newep', host + '/aggiornamenti-serie/', 'episode')
menu(itemlist, 'Anime', 'peliculas', host + '/anime/', 'episode')
menu(itemlist, 'TV Show', 'peliculas', host + '/tv-show/', 'episode')
menu(itemlist, 'Cerca...', 'search', contentType='search')
aplay(item, itemlist, list_servers, list_quality)
return itemlist
def newest(categoria):
logger.info("kod.ilgeniodellostreaming newest" + categoria)
log(categoria)
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = "%s/film/" % host
item.action = "peliculas"
itemlist = peliculas(item)
if categoria == "movie": item.url = host + '/film/'
elif categoria == "tvshow": item.url = host + '/serie/'
elif categoria == "anime": item.url = host + '/anime/'
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
@@ -91,37 +59,16 @@ def newest(categoria):
return itemlist
def categorias(item):
logger.info("kod.ilgeniodellostreaming categorias")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="genres scrolling">(.*?)</ul>')
# Estrae i contenuti
patron = '<li[^>]+><a href="(.*?)"[^>]+>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
logger.info("title=[" + scrapedtitle + "]")
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
folder=True))
return itemlist
def category(item):
return support.scrape(item, r'<li.*?><a href="(.*?)"[^>]+>(.*?)<\/a>' ,['url', 'title'], action='peliculas', patron_block= r'<ul class="' + item.args + r' scrolling">(.*?)<\/ul>')
def search(item, texto):
logger.info("[ilgeniodellostreaming.py] " + item.url + " search " + texto)
log(texto)
item.url = host + "/?s=" + texto
try:
return peliculas_src(item)
return peliculas(item)
except:
import sys
@@ -132,262 +79,57 @@ def search(item, texto):
def peliculas_src(item):
logger.info("kod.ilgeniodellostreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
patron = '<div class="thumbnail animation-2"><a href="(.*?)"><img src="(.*?)" alt="(.*?)" />[^>]+>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtipo in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
if scrapedtipo == "TV":
itemlist.append(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True))
else:
itemlist.append(
Item(channel=__channel__,
action="findvideos",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
patron = r'<div class="thumbnail animation-2"><a href="([^"]+)"><img src="([^"]+)" alt="[^"]+" \/>[^>]+>([^<]+)<\/span>.*?<a href.*?>([^<]+)<\/a>[^>]+>[^>]+>(?:<span class="rating">IMDb\s*([0-9.]+)<\/span>)?.*?(?:<span class="year">([0-9]+)<\/span>)?[^>]+>[^>]+><p>(.*?)<\/p>'
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, type_content_dict={'movie':['Film'], 'episode':['TV']}, type_action_dict={'findvideos':['Film'], 'episodios':['TV']})
def peliculas(item):
logger.info("kod.ilgeniodellostreaming peliculas")
if item.contentType == 'movie':
patron = r'<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="[^"]+"><\/a>[^>]+>[^>]+>[^>]+>\s*([0-9.]+)<\/div><span class="quality">([^<]+)<\/span>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/a>[^>]+>[^>]+>([^<]+)<\/span>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<div'
return support.scrape(item, patron, ['url', 'thumb', 'rating', 'quality', 'title', 'year', 'plot'], headers, patronNext='<span class="current">[^<]+<[^>]+><a href="([^"]+)"')
elif item.contentType == 'episode':
patron = r'<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="[^"]+"><\/a>[^>]+>[^>]+>[^>]+> ([0-9.]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>([^<]+)<.*?<div class="texto">([^<]+)'
return support.scrape(item, patron, ['url', 'thumb', 'rating', 'title', 'year', 'plot'], headers, action='episodios', patronNext='<span class="current">[^<]+<[^>]+><a href="([^"]+)"')
else:
patron = r'<div class="thumbnail animation-2"><a href="([^"]+)"><img src="([^"]+)" alt="[^"]+" \/>[^>]+>([^<]+)<\/span>.*?<a href.*?>([^<]+)<\/a>[^>]+>[^>]+>(?:<span class="rating">IMDb\s*([0-9.]+)<\/span>)?.*?(?:<span class="year">([0-9]+)<\/span>)?[^>]+>[^>]+><p>(.*?)<\/p>'
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, type_content_dict={'movie':['Film'], 'episode':['TV']}, type_action_dict={'findvideos':['Film'], 'episodios':['TV']})
def newep(item):
log()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
page = 1
if item.page:
page = item.page
# Estrae i contenuti
patron = '<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, r'<div class="poster"><img src="([^"]+)" alt="([^"]+)">[^>]+><a href="([^"]+)">')[0]
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
for i, (thumb, title, url) in enumerate(matches):
if (page - 1) * PERPAGE > i: continue
if i >= page * PERPAGE: break
title = scrapertoolsV2.decodeHtmlentities(title)
itemlist.append(
Item(channel=__channel__,
Item(channel=item.channel,
action="findvideos",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
fulltitle=title,
show=title,
title= support.typo(title,'bold'),
url=url,
thumbnail=thumb))
support.pagination(itemlist, item, page, PERPAGE)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def nuoviep(item):
logger.info("kod.ilgeniodellostreaming nuoviep")
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Carica la pagina
data = httptools.downloadpage(item.url).data
#blocco = scrapertools.find_single_match(data,
# r'<div class="items" style="margin-bottom:0px!important">(.*?)<div class="items" style="margin-bottom:0px!important">')
# Estrae i contenuti
patron = r'<div class="poster"><img src="([^"]+)" alt="([^"]+)">[^>]+><a href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedthumbnail, scrapedtitle, scrapedurl) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="nuoviep",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def serie(item):
logger.info("kod.ilgeniodellostreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info("kod.ilgeniodellostreaming episodios")
itemlist = []
patron = '<ul class="episodios">.*?</ul>'
data = httptools.downloadpage(item.url).data
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
patron = '<li><div class="imagen"><a href="(.*?)">[^>]+>[^>]+>[^>]+><.*?numerando">(.*?)<[^>]+>[^>]+>[^>]+>(.*?)</a>'
episodi = re.compile(patron, re.DOTALL).findall(match)
for scrapedurl, scrapednumber, scrapedtitle in episodi:
n0 = scrapednumber.replace(" ", "")
n1 = n0.replace("-", "x")
itemlist.append(Item(channel=__channel__,
action="findvideos",
contentType="episode",
fulltitle=n1 + " " + scrapedtitle,
show=n1 + " " + scrapedtitle,
title=n1 + " [COLOR orange] " + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
plot=item.plot,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=__channel__,
title=config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
return support.scrape(item, r'<a href="([^"]+)"><img src="([^"]+)">.*?<div class="numerando">([^<]+).*?<div class="episodiotitle">[^>]+>([^<]+)<\/a>',['url', 'thumb', 'episode', 'title'], patron_block='<div id="seasons">(.*?)<div class="sbox')
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
patron = '<td><a class="link_a" href="(.*?)" target="_blank">'
matches = re.compile(patron, re.DOTALL).findall(data)
log()
matches, data = support.match(item, '<iframe class="metaframe rptss" src="([^"]+)"[^>]+>',headers=headers)
for url in matches:
html = httptools.downloadpage(url).data
data += str(scrapertools.find_multiple_matches(html, 'window.location.href=\'(.*?)\''))
html = httptools.downloadpage(url, headers=headers).data
data += str(scrapertoolsV2.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">'))
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + " - " + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
# Requerido para Filtrar enlaces
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
return support.server(item, data)

View File

@@ -78,18 +78,18 @@ def get_url_headers(url):
def load_cookies(alfa_s=False):
cookies_lock.acquire()
if os.path.isfile(ficherocookies):
if not alfa_s: logger.info("Leyendo fichero cookies")
if not alfa_s: logger.info("Reading cookies File")
try:
cj.load(ficherocookies, ignore_discard=True)
except:
if not alfa_s: logger.info("El fichero de cookies existe pero es ilegible, se borra")
if not alfa_s: logger.info("The cookie file exists but is illegible. Deleted")
os.remove(ficherocookies)
cookies_lock.release()
def save_cookies(alfa_s=False):
cookies_lock.acquire()
if not alfa_s: logger.info("Guardando cookies...")
if not alfa_s: logger.info("Saving cookies...")
cj.save(ficherocookies, ignore_discard=True)
cookies_lock.release()
@@ -243,18 +243,18 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if not alfa_s:
logger.info("----------------------------------------------")
logger.info("downloadpage Alfa: %s" %__version)
logger.info("downloadpage KOD: %s" %__version)
logger.info("----------------------------------------------")
logger.info("Timeout: %s" % timeout)
logger.info("URL: " + url)
logger.info("Dominio: " + urlparse.urlparse(url)[1])
logger.info("Domain: " + urlparse.urlparse(url)[1])
if post:
logger.info("Peticion: POST" + proxy_stat)
logger.info("Request: POST" + proxy_stat)
else:
logger.info("Peticion: GET" + proxy_stat)
logger.info("Usar Cookies: %s" % cookies)
logger.info("Descargar Pagina: %s" % (not only_headers))
logger.info("Fichero de Cookies: " + ficherocookies)
logger.info("Request: GET" + proxy_stat)
logger.info("Use Cookies: %s" % cookies)
logger.info("Download Page: %s" % (not only_headers))
logger.info("Cookie File: " + ficherocookies)
logger.info("Headers:")
for header in request_headers:
logger.info("- %s: %s" % (header, request_headers[header]))
@@ -269,7 +269,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
opener = urllib2.build_opener(*handlers)
if not alfa_s:
logger.info("Realizando Peticion")
logger.info("Making Request")
# Contador
inicio = time.time()
@@ -321,7 +321,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
response["url"] = handle.geturl()
if not alfa_s:
logger.info("Terminado en %.2f segundos" % (response["time"]))
logger.info("Finished in %.2f seconds" % (response["time"]))
logger.info("Response sucess: %s" % (response["sucess"]))
logger.info("Response code: %s" % (response["code"]))
logger.info("Response error: %s" % (response["error"]))
@@ -350,22 +350,22 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if response["headers"].get('content-encoding') == 'gzip':
if not alfa_s:
logger.info("Descomprimiendo...")
logger.info("Decompressing...")
data_alt = response["data"]
try:
response["data"] = gzip.GzipFile(fileobj=StringIO(response["data"])).read()
if not alfa_s:
logger.info("Descomprimido")
logger.info("Decompressed")
except:
if not alfa_s:
logger.info("No se ha podido descomprimir con gzip. Intentando con zlib")
logger.info("Could not decompress with gzip. Trying with zlib")
response["data"] = data_alt
try:
import zlib
response["data"] = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(response["data"])
except:
if not alfa_s:
logger.info("No se ha podido descomprimir con zlib")
logger.info("Could not decompress with zlib")
response["data"] = data_alt
# Anti Cloudflare
@@ -375,16 +375,16 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if cf.is_cloudflare:
count_retries += 1
if not alfa_s:
logger.info("cloudflare detectado, esperando %s segundos..." % cf.wait_time)
logger.info("CloudFlare detected, waiting %s seconds..." % cf.wait_time)
auth_url = cf.get_url()
if not alfa_s:
logger.info("Autorizando... intento %d url: %s" % (count_retries, auth_url))
logger.info("Authorizing... attempt %d url: %s" % (count_retries, auth_url))
tt = downloadpage(auth_url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
if tt.code == 403:
tt = downloadpage(url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
if tt.sucess:
if not alfa_s:
logger.info("Autorización correcta, descargando página")
logger.info("Correct authorization, downloading page")
resp = downloadpage(url=response["url"], post=post, headers=headers, timeout=timeout,
follow_redirects=follow_redirects, count_retries=count_retries,
cookies=cookies, replace_headers=replace_headers, add_referer=add_referer, proxy=proxy, proxy_web=proxy_web, count_retries_tot=count_retries_tot, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
@@ -397,7 +397,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
response["url"] = resp.url
else:
if not alfa_s:
logger.info("No se ha podido autorizar")
logger.info("Unable to authorize")
# Si hay errores usando un Proxy, se refrescan el Proxy y se reintenta el número de veces indicado en proxy_retries
try:
@@ -456,7 +456,7 @@ def channel_proxy_list(url, forced_proxy=None):
proxy_channel_bloqued = dict()
proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str)
except:
logger.debug('Proxytools no inicializado correctamente')
logger.debug('Proxytools not initialized correctly')
return False
if not url.endswith('/'):

View File

@@ -141,7 +141,11 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
<<<<<<< HEAD
known_keys = ['url', 'title', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'lang']
=======
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type'] #by greko aggiunto episode
>>>>>>> 69c3700f279e8bc0c0741982c8c26b045549ffc6
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
@@ -157,6 +161,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).replace('"', "'").strip() # fix by greko da " a '
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
<<<<<<< HEAD
longtitle = '[B]' + title + '[/B] '
if scraped["quality"]:
longtitle += '[COLOR blue][' + scraped["quality"] + '][/COLOR]'
@@ -168,6 +173,16 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
else:
lang = 'ITA'
longtitle += '[COLOR blue][' + lang + '][/COLOR]'
=======
longtitle = typo(title, 'bold')
if scraped['quality']: longtitle = longtitle + typo(scraped['quality'], '_ [] color kod')
if scraped['episode']:
scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x' , scraped['episode'])
longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle
if scraped['title2']:
title2 = scrapertoolsV2.decodeHtmlentities(scraped["title2"]).strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
>>>>>>> 69c3700f279e8bc0c0741982c8c26b045549ffc6
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
infolabels = item.infoLabels
@@ -497,15 +512,29 @@ def nextPage(itemlist, item, data, patron, function_level=1):
return itemlist
def server(item, data='', headers='', AutoPlay=True, CheckLinks=True):
def pagination(itemlist, item, page, perpage, function_level=1):
if len(itemlist) >= page * perpage:
itemlist.append(
Item(channel=item.channel,
action=inspect.stack()[function_level][3],
contentType=item.contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
url=item.url,
args=item.args,
page=page + 1,
thumbnail=thumb()))
return itemlist
def server(item, data='', itemlist='', headers='', AutoPlay=True, CheckLinks=True):
if not data:
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=str(data))
if not itemlist:
itemlist = servertools.find_video_items(data=str(data))
for videoitem in itemlist:
videoitem.title = "".join([item.title, ' ', typo(videoitem.title, 'color kod []')])
videoitem.title = "".join([item.title, ' ', typo(videoitem.title, 'color kod []'), typo(videoitem.quality, 'color kod []') if videoitem.quality else ""])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
@@ -535,6 +564,7 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True):
if AutoPlay == True:
autoplay.start(itemlist, item)
videolibrary(itemlist, item)
return itemlist

View File

@@ -143,7 +143,7 @@ def start(itemlist, item):
# 2: Solo servidores
# 3: Solo calidades
# 4: No ordenar
if (settings_node['custom_servers'] and settings_node['custom_quality']):
if (settings_node['custom_servers'] and settings_node['custom_quality']) or get_setting('autoplay'):
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
elif settings_node['custom_servers']:
priority = 2 # Solo servidores