CB01 Channel completely rewritten

This commit is contained in:
Alhaziel
2019-03-15 18:07:39 +01:00
committed by mac12m99
parent 08774b6fd9
commit 8390669bcc
2 changed files with 263 additions and 318 deletions

View File

@@ -1,11 +1,11 @@
{
"id": "cineblog01",
"name": "Cineblog01",
"id": "cb01",
"name": "CB01",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/cineblog01.png",
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/cineblog01.png",
"thumbnail": "cb01.png",
"banner": "cb01.png",
"categories": [
"tvshow",
"movie","cult","top channels"
@@ -26,6 +26,43 @@
"default": true,
"enabled": true,
"visible": true
}
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Non filtrare",
"IT"
]
}
]
}

View File

@@ -3,10 +3,11 @@
# Kodi on Demand - Kodi Addon
# Canale per cineblog01
# ------------------------------------------------------------
import re
import urlparse
from channels import autoplay
from channels import autoplay, filtertools
from core import scrapertools, httptools, servertools, tmdb
from core.item import Item
from lib import unshortenit
@@ -20,11 +21,16 @@ permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'wstream']
list_quality = ['HD', 'SD']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cineblog01')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cineblog01')
#esclusione degli articoli 'di servizio'
blacklist = ['Aggiornamento Quotidiano Serie TV', 'Richieste Serie TV', 'CB01.UNO TROVA LINDIRIZZO UFFICIALE', 'COMING SOON!', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬']
blacklist = ['BENVENUTI ', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ', 'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬']
def mainlist(item):
@@ -34,69 +40,125 @@ def mainlist(item):
# Main options
itemlist = [Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Novita'[/COLOR]",
action="video",
title="[B]Film[/B]",
url=host,
extra="movie",
contentType="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Alta Definizione [HD][/COLOR]",
url="%s/tag/film-hd-altadefinizione/" % host,
extra="movie",
action="menu",
title="[B] > Menù HD[/B]",
extra='Film HD Streaming',
url=host,
contentType="movie",
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
Item(channel=item.channel,
action="menuhd",
title="[COLOR azure]Menù HD[/COLOR]",
action="menu",
title="[B] > Film per Genere[/B]",
extra='Film per Genere',
url=host,
extra="movie",
thumbnail="http://files.softicons.com/download/computer-icons/disks-icons-by-wil-nichols/png/256x256/Blu-Ray.png"),
Item(channel=item.channel,
action="menugeneros",
title="[COLOR azure]Per Genere[/COLOR]",
url=host,
extra="movie",
contentType="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="menuanyos",
title="[COLOR azure]Per Anno[/COLOR]",
action="menu",
title="[B] > Film per Anno[/B]",
extra='Film per Anno',
url=host,
extra="movie",
contentType="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca Film[/COLOR]",
extra="movie",
title="[COLOR blue] > Cerca Film[/COLOR]",
contentType="movie",
url=host,
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=item.channel,
action="listserie",
title="[COLOR azure]Serie Tv - Novita'[/COLOR]",
url="%s/serietv/" % host,
extra="tvshow",
action="video",
title="[B]Serie TV[/B]",
url=host + '/serietv/',
contentType="episode",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="menu",
title="[B] > Serie-Tv per Lettera[/B]",
extra='Serie-Tv per Lettera',
url=host + '/serietv/',
contentType="episode",
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
Item(channel=item.channel,
action="menu",
title="[B] > Serie-Tv per Genere[/B]",
extra='Serie-Tv per Genere',
url=host + '/serietv/',
contentType="episode",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="menu",
title="[B] > Serie-Tv per Anno[/B]",
extra='Serie-Tv per Anno',
url=host + '/serietv/',
contentType="episode",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca Serie Tv[/COLOR]",
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
title="[COLOR blue] > Cerca Serie TV[/COLOR]",
contentType="episode",
url=host + '/serietv/',
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
]
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu(item):
itemlist= []
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t','',data)
block = scrapertools.get_match(data, item.extra + r'<span.*?><\/span>.*?<ul.*?>(.*?)<\/ul>')
patron = r'href="([^"]+)">(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(
channel=item.channel,
title=scrapedtitle,
contentType=item.contentType,
action='video',
url=host + scrapedurl
)
)
return itemlist
def search(item, text):
logger.info("[cineblog01.py] " + item.url + " search " + text)
try:
item.url = item.url + "/?s=" + text
return video(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info("[cineblog01.py] newest")
itemlist = []
item = Item()
if categoria == "film":
if categoria == "peliculas":
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
item.extra = "movie"
try:
# Carica la pagina
data = httptools.downloadpage(item.url).data
blocco = scrapertools.get_match(data, 'Ultimi 100 film aggiunti:.*?<\/div>')
patron = '<a href="([^"]+)">([^<]+)<\/a>'
logger.info("[cineblog01.py] DATA: "+data)
blocco = scrapertools.get_match(data, r'Ultimi 100 film aggiunti:.*?<\/td>')
patron = r'<a href="([^"]+)">([^<]+)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
@@ -119,305 +181,136 @@ def newest(categoria):
return itemlist
def peliculas(item):
logger.info("[cineblog01.py] peliculas")
def video(item):
logger.info("[cineblog01.py] video")
itemlist = []
if item.url == "":
item.url = host
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t','',data)
block = scrapertools.get_match(data, r'<div class="sequex-page-left">(.*?)<aside class="sequex-page-right">')
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patronvideos = '<div class="span4".*?<a.*?<p><img src="([^"]+)".*?'
patronvideos += '<div class="span8">.*?<a href="([^"]+)"> <h1>([^"]+)</h1></a>.*?'
patronvideos += '<strong>([^<]*)[<br />,</strong>].*?<br />([^<+]+)'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedtitle = scrapertools.unescape(match.group(3))
if not scrapedtitle in blacklist:
scrapedurl = urlparse.urljoin(item.url, match.group(2))
scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
scrapedplot = scrapertools.unescape("[COLOR orange]" + match.group(4) + "[/COLOR]\n" + match.group(5).strip())
scrapedplot = scrapertools.htmlclean(scrapedplot).strip()
cleantitle = re.sub(r'(?:\[HD/?3?D?\]|\[Sub-ITA\])', '', scrapedtitle)
if item.contentType == 'movie' or '/serietv/' not in item.url:
action = 'findvideos'
logger.info("### FILM ###")
patron = r'type-post.*?>.*?<img src="([^"]+)".*?<h3.*?<a href="([^"]+)">([^<]+)<\/a>.*?<strong>([^<]+)<.*?br \/>\s+(.*?) '
matches = re.compile(patron, re.DOTALL).findall(block)
logger.info("### MATCHES ###" + str(matches))
for scrapedthumb, scrapedurl, scrapedtitle, scrapedinfo, scrapedplot in matches:
title = re.sub(r'(?:\[HD/?3?D?\]|\[Sub-ITA\])', '', scrapedtitle)
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
quality = scrapertools.find_single_match(scrapedtitle, r'\[(.*?)\]')
genre = scrapertools.find_single_match(scrapedinfo, '([A-Z]+) &')
duration = scrapertools.find_single_match(scrapedinfo,'DURATA ([0-9]+)&')
infolabels = {}
if year:
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
infolabels['year'] = year
title = title.replace("(%s)" % year, '').strip()
infolabels['Year'] = year
if duration:
infolabels['Duration'] = int(duration)*60
if genre:
infolabels['Genre'] = genre
if quality:
longtitle = '[B]' + title + '[/B] [COLOR blue][' + quality + '][/COLOR]'
else:
longtitle = '[B]' + title + '[/B]'
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=scrapedtitle,
fulltitle=cleantitle,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
infoLabels=infolabels,
show=cleantitle,
extra=item.extra))
infolabels['Plot'] = scrapedplot + '...'
if not scrapedtitle in blacklist:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
url=scrapedurl,
infoLabels=infolabels,
thumbnail=scrapedthumb
)
)
else:
action = 'episodios'
patron = 'type-post.*?>(.*?)<div class="card-action">'
matches = re.compile(patron, re.DOTALL).findall(block)
for match in matches:
patron = r'<img src="([^"]+)".*?<h3.*?<a href="([^"]+)">([^<]+)<\/a>.*?<p>(.*?)\(([0-9]+).*?\) (.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(match)
for scrapedthumb, scrapedurl, scrapedtitle, scrapedgenre, scrapedyear, scrapedplot in matches:
longtitle = '[B]' + scrapedtitle + '[/B]'
title = scrapedtitle
infolabels = {}
infolabels['Year'] = scrapedyear
infolabels['Genre'] = scrapedgenre
infolabels['Plot'] = scrapedplot
if not scrapedtitle in blacklist:
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
url=scrapedurl,
infoLabels=infolabels,
thumbnail=scrapedthumb
)
)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Next page mark
#next_page = scrapertools.find_single_match(data, r"<link rel='next' href='(.*?)' />")
#if not next_page:
next_page = scrapertools.find_single_match(data, r'<li class="active_page"><a href="[^"]+">\d+</a></li>\s<li><a href="([^"]+)">\d+</a></li>')
patron = "<a class='page-link'" + ' href="(.*?)"><i class="fa fa-angle-right">'
next_page = scrapertools.find_single_match(data, patron)
logger.info('NEXT '+next_page)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def menugeneros(item):
logger.info("[cineblog01.py] menugeneros")
return menulist(item, '<select name="select2"(.*?)</select>')
def menuhd(item):
logger.info("[cineblog01.py] menuhd")
return menulist(item, '<select name="select1"(.*?)</select>')
def menuanyos(item):
logger.info("[cineblog01.py] menuvk")
return menulist(item, '<select name="select3"(.*?)</select>')
def menulist(item, re_txt):
itemlist = []
data = httptools.downloadpage(item.url).data
# Narrow search by selecting only the combo
bloque = scrapertools.get_match(data, re_txt)
# The categories are the options for the combo
patron = '<option value="([^"]+)">([^<]+)</option>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for url, titulo in matches:
scrapedtitle = titulo
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
plot=scrapedplot))
return itemlist
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item, texto):
logger.info("[cineblog01.py] " + item.url + " search " + texto)
try:
if item.extra == "movie":
item.url = host + "/?s=" + texto
return peliculas(item)
if item.extra == "tvshow":
item.url = host + "/serietv/?s=" + texto
return listserie(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def listserie(item):
logger.info("[cineblog01.py] listaserie")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patronvideos = '<div class="span4">\s*<a href="([^"]+)"><img src="([^"]+)".*?<div class="span8">.*?<h1>([^<]+)</h1></a>(.*?)<br><a'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedtitle = scrapertools.unescape(match.group(3))
if not scrapedtitle in blacklist:
scrapedurl = match.group(1)
scrapedthumbnail = match.group(2)
scrapedplot = scrapertools.unescape(match.group(4))
scrapedplot = scrapertools.htmlclean(scrapedplot).strip()
itemlist.append(
Item(channel=item.channel,
action="season_serietv",
contentType="tvshow",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Next page mark
next_page = scrapertools.find_single_match(data, "<link rel='next' href='(.*?)' />")
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="listserie",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def season_serietv(item):
def load_season_serietv(html, item, itemlist, season_title):
if len(html) > 0 and len(season_title) > 0:
itemlist.append(
Item(channel=item.channel,
action="episodios",
title="[COLOR azure]%s[/COLOR]" % season_title,
contentType="episode",
url=html,
extra="tvshow",
show=item.show))
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# data = scrapertools.decodeHtmlentities(data)
data = scrapertools.get_match(data, '<td bgcolor="#ECEAE1">(.*?)</table>')
# for x in range(0, len(scrapedtitle)-1):
# logger.debug('%x: %s - %s',x,ord(scrapedtitle[x]),chr(ord(scrapedtitle[x])))
blkseparator = chr(32) + chr(226) + chr(128) + chr(147) + chr(32)
data = data.replace(blkseparator, ' - ')
starts = []
season_titles = []
patron = '^(?:seri|stagion)[i|e].*$'
matches = re.compile(patron, re.MULTILINE | re.IGNORECASE).finditer(data)
for match in matches:
if match.group() != '':
season_titles.append(match.group())
starts.append(match.end())
i = 1
len_season_titles = len(season_titles)
while i <= len_season_titles:
inizio = starts[i - 1]
fine = starts[i] if i < len_season_titles else -1
html = data[inizio:fine]
season_title = season_titles[i - 1]
load_season_serietv(html, item, itemlist, season_title)
i += 1
action="video",
contentType=item.contentType,
title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]",
url=next_page))
return itemlist
def episodios(item):
itemlist = []
if item.extra == "tvshow":
itemlist.extend(episodios_serie_new(item))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios" + "###" + item.extra,
show=item.show))
return itemlist
def episodios_serie_new(item):
def load_episodios(html, item, itemlist, lang_title):
# for data in scrapertools.decodeHtmlentities(html).splitlines():
patron = '((?:.*?<a href=".*?"[^=]+="_blank"[^>]+>.*?<\/a>)+)'
matches = re.compile(patron).findall(html)
for data in matches:
# Estrae i contenuti
scrapedtitle = data.split('<a ')[0]
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
if scrapedtitle != 'Categorie':
scrapedtitle = scrapedtitle.replace('&#215;', 'x')
if scrapedtitle.find(' - ') > 0:
scrapedtitle = scrapedtitle[0:scrapedtitle.find(' - ')]
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
url=data,
thumbnail=item.thumbnail,
extra=item.extra,
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
show=item.show))
logger.info("[cineblog01.py] episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t','',data)
block = scrapertools.get_match(data, r'<article class="sequex-post-content">(.*?)<\/article>').replace('&#215;','x').replace(' &#8211; ','')
patron = r'<p>([0-9]+x[0-9]+)(.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedtitle, scrapedurl in matches:
title = '[B]' + scrapedtitle + '[/B] - ' + item.title
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=title,
fulltitle=title,
show=title,
url=scrapedurl,
)
)
lang_title = item.title
if lang_title.upper().find('SUB') > 0:
lang_title = 'SUB ITA'
else:
lang_title = 'ITA'
html = item.url
load_episodios(html, item, itemlist, lang_title)
return itemlist
def findvideos(item):
if item.contentType == "movie":
return findvid_film(item)
if item.contentType == "episode":
return findvid_serie(item)
return []
def findvid_film(item):
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
streaming = scrapertools.find_single_match(data, re_txt)
patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(streaming)
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.fulltitle + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
@@ -437,7 +330,6 @@ def findvid_film(item):
# Carica la pagina
data = httptools.downloadpage(item.url).data
# data = scrapertools.decodeHtmlentities(data)
# Extract the quality format
patronvideos = '>([^<]+)</strong></div>'
@@ -447,28 +339,46 @@ def findvid_film(item):
QualityStr = scrapertools.unescape(match.group(1))[6:]
# Estrae i contenuti - Streaming
load_links(itemlist, '<strong>Streaming:</strong>(.*?)<table height="30">', "orange", "Streaming", "SD")
load_links(itemlist, '<strong>Streaming:</strong>(.*?)<table class="cbtable" height="30">', "orange", "Streaming", "SD")
# Estrae i contenuti - Streaming HD
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">', "yellow", "Streaming HD", "HD")
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<table class="cbtable" height="30">', "yellow", "Streaming HD", "HD")
autoplay.start(itemlist, item)
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">', "pink", "Streaming 3D")
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<table class="cbtable" height="30">', "pink", "Streaming 3D")
# Estrae i contenuti - Download
load_links(itemlist, '<strong>Download:</strong>(.*?)<table height="30">', "aqua", "Download")
load_links(itemlist, '<strong>Download:</strong>(.*?)<table class="cbtable" height="30">', "aqua", "Download")
# Estrae i contenuti - Download HD
load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">', "azure",
load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<table class="cbtable" width="100%" height="20">', "azure",
"Download HD")
if len(itemlist) == 0:
itemlist = servertools.find_video_items(item=item)
return itemlist
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def findvid_serie(item):
def load_vid_series(html, item, itemlist, blktxt):
@@ -510,7 +420,7 @@ def findvid_serie(item):
lnkblkp.append(data.find('<a'))
# Find new blocks of links
patron = '<a\s[^>]+>[^<]+</a>([^<]+)'
patron = r'<a\s[^>]+>[^<]+</a>([^<]+)'
matches = re.compile(patron, re.DOTALL).finditer(data)
for match in matches:
sep = match.group(1)
@@ -531,7 +441,6 @@ def findvid_serie(item):
return itemlist
def play(item):
logger.info("[cineblog01.py] play")
itemlist = []
@@ -568,13 +477,13 @@ def play(item):
from lib import jsunpack
try:
data = scrapertools.get_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
data = scrapertools.get_match(data, r"(eval\(function\(p,a,c,k,e,d.*?)</script>")
data = jsunpack.unpack(data)
logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
except IndexError:
logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)
data = scrapertools.find_single_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
data = scrapertools.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";')
data, c = unshortenit.unwrap_30x_only(data)
if data.startswith('/'):
data = urlparse.urljoin("http://swzz.xyz", data)
@@ -598,5 +507,4 @@ def play(item):
except AttributeError:
logger.error("vcrypt data doesn't contain expected URL")
return itemlist
return itemlist