Files
addon/channels/cineblog01.py
marco ca6d5eb56d KoD 0.8
- tanti miglioramenti sotto il cofano, supporto iniziale al futuro kodi 19
- Nuova modalità di visualizzazione per episodio successivo
- fixato wstream tramite l'aggiunta della finestra per risolvere il reCaptcha
- aggiunta sezione segnala un problema in Aiuto
- altri fix e migliorie varie a canali e server
2020-02-22 13:36:58 +01:00

305 lines
12 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per cineblog01
# ------------------------------------------------------------
import re
from core import scrapertools, httptools, servertools, tmdb, support
from core.item import Item
from lib import unshortenit
from platformcode import logger, config
from lib.concurrent import futures
def findhost():
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
if 'google' in permUrl['location']:
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
else:
host = permUrl['location']
return host
host = config.get_channel_url(findhost)
headers = [['Referer', host]]
list_servers = ['mixdrop', 'akstream', 'wstream', 'backin']
list_quality = ['HD', 'SD', 'default']
checklinks = config.get_setting('checklinks', 'cineblog01')
checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
@support.menu
def mainlist(item):
film = [
('HD', ['', 'menu', 'Film HD Streaming']),
('Generi', ['', 'menu', 'Film per Genere']),
('Anni', ['', 'menu', 'Film per Anno']),
('Paese', ['', 'menu', 'Film per Paese']),
('Ultimi Aggiornati',['/lista-film-ultimi-100-film-aggiornati/', 'peliculas', 'newest']),
('Ultimi Aggiunti', ['/lista-film-ultimi-100-film-aggiunti/', 'peliculas', 'newest'])
]
tvshow = ['/serietv/',
('Per Lettera', ['/serietv/', 'menu', 'Serie-Tv per Lettera']),
('Per Genere', ['/serietv/', 'menu', 'Serie-Tv per Genere']),
('Per anno', ['/serietv/', 'menu', 'Serie-Tv per Anno']),
('Ultime Aggiornate', ['/serietv/', 'peliculas', 'newest'])
]
docu = [('Documentari bullet bold', ['/category/documentario/', 'peliculas']),
('HD submenu', ['category/hd-alta-definizione/documentario-hd/', 'peliculas'])
]
return locals()
@support.scrape
def menu(item):
patronBlock = item.args + r'<span.*?><\/span>.*?<ul.*?>(?P<block>.*?)<\/ul>'
patronMenu = r'href="?(?P<url>[^">]+)"?>(?P<title>.*?)<\/a>'
action = 'peliculas'
return locals()
# @support.scrape
# def newest(categoria):
#
# # debug = True
# patron = r'<a href="?(?P<url>[^">]+)"?>(?P<title>[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N|SUB-ITA)\])?\s*(?:\[(?P<quality>HD|SD|HD/3D)\])?\s*\((?P<year>[0-9]{4})\)<\/a>'
# if type(categoria) != Item:
# item = Item()
# else:
# item = categoria
# categoria = 'series' if item.contentType != 'movie' else 'movie'
# pagination = 20
# if categoria == 'series':
# item.contentType = 'tvshow'
# action = 'episodios'
# item.url = host + 'serietv/aggiornamento-quotidiano-serie-tv/'
# patronBlock = r'<article class="sequex-post-content">(?P<block>.*?)</article>'
# patron = '<a href="(?P<url>[^"]+)".*?>(?P<title>[^<([|]+).*?(?P<lang>ITA|SUB-ITA)?</a'
# else:
# item.contentType = 'movie'
# item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
# patronBlock = r'Ultimi 100 film aggiunti:(?P<block>.*?)<\/td>'
# # else:
# # patronBlock = r'Ultimi 100 film Aggiornati:(?P<block>.*?)<\/td>'
# # item = categoria
# return locals()
def newest(categoria):
support.log(categoria)
item = support.Item()
try:
if categoria == "series":
item.contentType = 'tvshow'
item.url = host + '/serietv/' # aggiornamento-quotidiano-serie-tv/'
else:
item.contentType = 'movie'
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
item.args = "newest"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
def search(item, text):
support.log(item.url, "search", text)
try:
item.url = item.url + "/?s=" + text.replace(' ', '+')
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
@support.scrape
def peliculas(item):
# esclusione degli articoli 'di servizio'
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO &#x25b6; TROVA L&#8217;INDIRIZZO UFFICIALE ',
'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬',
'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?', 'OSCAR 2020 &#x25b6; VOTA IL TUO FILM PREFERITO! &#x1f3ac;']
# debug = True
if 'newest' in item.args:
if '/serietv/' not in item.url:
pagination = ''
patronBlock = r'Ultimi 100 film [^:]+:(?P<block>.*?)<\/td>'
patron = r'<a href="?(?P<url>[^">]+)"?>(?P<title>[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N|SUB-ITA)\])?\s*(?:\[(?P<quality>HD|SD|HD/3D)\])?\s*\((?P<year>[0-9]{4})\)<\/a>'
action = 'findvideos'
else:
patronBlock = r'Ultime SerieTv aggiornate(?P<block>.*?)Lista'
patron = r'src="(?P<thumb>[^"]+)" alt="(?P<title>.*?)(?: &#8211; \d+&#215;\d+)?(?:"| &#8211; )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>[^>]+>[^>]+><a href="(?P<url>[^"]+)".*?<div class="rpwe-summary">.*?\((?P<year>\d{4})[^\)]*\) (?P<plot>[^<]+)<'
action = 'episodios'
elif '/serietv/' not in item.url:
patron = r'<div class="?card-image"?>.*?<img src="?(?P<thumb>[^" ]+)"? alt[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="?(?P<url>[^" >]+)(?:\/|"|\s+)>(?P<title>[^<[(]+)(?:\[(?P<quality>[A-Za-z0-9/-]+)])? (?:\((?P<year>[0-9]{4})\))?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<genre>[^<>&–]+)(?:[^ ]+\s*DURATA\s*(?P<duration>[0-9]+)[^>]+>[^>]+>[^>]+>(?P<plot>[^<>]+))?'
# patron = r'<div class="?card-image"?>.*?<img src="?(?P<thumb>[^" ]+)"? alt.*?<a href="?(?P<url>[^" >]+)(?:\/|"|\s+)>(?P<title>[^<[(]+)(?:\[(?P<quality>[A-Za-z0-9/-]+)])? (?:\((?P<year>[0-9]{4})\))?.*?<strong>(?P<genre>[^<>&]+).*?DURATA (?P<duration>[0-9]+).*?<br(?: /)?>(?P<plot>[^<>]+)'
action = 'findvideos'
else:
# debug = True
patron = r'div class="card-image">.*?<img src="(?P<thumb>[^ ]+)" alt.*?<a href="(?P<url>[^ >]+)">(?P<title>.*?)(?:&#.*?)?(?P<lang>(?:[Ss][Uu][Bb]-)?[Ii][Tt][Aa])?<\/a>.*?(?:<strong><span style="[^"]+">(?P<genre>[^<>0-9(]+)\((?P<year>[0-9]{4}).*?</(?:p|div)>(?P<plot>.*?))?</div'
action = 'episodios'
item.contentType = 'tvshow'
# patronBlock=[r'<div class="?sequex-page-left"?>(?P<block>.*?)<aside class="?sequex-page-right"?>',
# '<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)']
if 'newest' not in item.args: patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">'
return locals()
@support.scrape
def episodios(item):
patronBlock = r'(?P<block><div class="sp-head[a-z ]*?" title="Espandi">\s*(?:STAGION[EI]\s*(?:DA\s*[0-9]+\s*A)?\s*[0-9]+|MINISERIE) - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?[^<>]*?<\/div>.*?)<div class="spdiv">\[riduci\]<\/div>'
patron = '(?:<p>|<strong>)(?P<episode>[0-9]+(?:&#215;|×)[0-9]+)\s*(?P<title2>[^&<]*)?(?:&#8211;|-)?\s*(?P<url>.*?)(?:<\/p>|<br)'
return locals()
def findvideos(item):
if item.contentType == "episode":
return findvid_serie(item)
def load_links(urls, re_txt, desc_txt, quality=""):
if re_txt:
streaming = scrapertools.find_single_match(data, re_txt).replace('"', '')
support.log('STREAMING',streaming)
matches = support.match(streaming, patron = r'<td><a.*?href=([^ ]+) [^>]+>[^<]+<').matches
with futures.ThreadPoolExecutor() as executor:
u = [executor.submit(final_links, match) for match in matches]
for res in futures.as_completed(u):
if res.result():
urls.append(res.result())
# for url in matches:
# # logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, url, server))
# urls.append(final_links(url))
support.log()
itemlist = urls = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\t','',data)
# Extract the quality format
patronvideos = '>([^<]+)</strong></div>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
QualityStr = ""
for match in matches:
QualityStr = scrapertools.decodeHtmlentities(match.group(1))
# Estrae i contenuti - Streaming
load_links(urls, '<strong>Streamin?g:</strong>(.*?)cbtable', "Streaming", "SD")
# Estrae i contenuti - Streaming HD
load_links(urls, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable', "Streaming HD", "HD")
# Estrae i contenuti - Streaming 3D
load_links(urls, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "Streaming 3D")
itemlist=support.server(item, urls)
if itemlist and QualityStr:
itemlist.insert(0,
Item(channel=item.channel,
action="",
title=support.typo(QualityStr,'[] color kod bold'),
folder=False))
return itemlist
# Estrae i contenuti - Download
# load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")
# Estrae i contenuti - Download HD
# load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<tableclass=cbtable width=100% height=20>', "azure", "Download HD")
def findvid_serie(item):
def load_vid_series(html, item, urls, blktxt=''):
# logger.info('HTML' + html)
# patron = r'<a href="([^"]+)"[^=]+="_blank"[^>]+>(?!<!--)(.*?)</a>'
# Estrae i contenuti
# matches = re.compile(patron, re.DOTALL).finditer(html)
matches = support.match(html, patron = r'<a href="([^"]+)"[^=]+="_blank"[^>]+>(?!<!--).*?</a>').matches
with futures.ThreadPoolExecutor() as executor:
u = [executor.submit(final_links, match) for match in matches]
for res in futures.as_completed(u):
if res.result():
urls.append(res.result())
# for url, server in matches:
# urls.append(final_links(url))
support.log()
itemlist = []
lnkblk = []
lnkblkp = []
urls = []
data = item.url
# Blocks with split
blk=re.split(r"(?:>\s*)?([A-Za-z\s0-9]*):\s*<",data,re.S)
blktxt=""
for b in blk:
if b[0:3]=="a h" or b[0:4]=="<a h":
load_vid_series("<%s>"%b, item, urls, blktxt)
blktxt=""
elif len(b.strip())>1:
blktxt=b.strip()
return support.server(item, urls)
def final_links(url):
support.log()
itemlist = []
item= Item()
item.url = url
### Handling new cb01 wrapper
if host[9:] + "/film/" in item.url:
iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
support.log("/film/ wrapper: ", iurl)
if iurl:
item.url = iurl
if '/goto/' in item.url:
item.url = item.url.split('/goto/')[-1].decode('base64')
item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')
logger.debug("##############################################################")
if "go.php" in item.url:
data = httptools.downloadpage(item.url).data
if "window.location.href" in data:
try:
data = scrapertools.find_single_match(data, 'window.location.href = "([^"]+)";')
except IndexError:
data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
data, c = unshortenit.unwrap_30x_only(data)
else:
data = scrapertools.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
logger.debug("##### play go.php data ##\n%s\n##" % data)
else:
data = support.swzz_get_url(item)
return data