Merge branch 'master' into master
This commit is contained in:
@@ -153,7 +153,7 @@ def findvideos(item):
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -257,7 +257,7 @@ def findvideos_film(item):
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -101,7 +101,7 @@ def findvideos(item):
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
support.videolibrary(itemlist, item ,'color kod bold')
|
||||
|
||||
@@ -179,7 +179,7 @@ def findvideos(item):
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -247,7 +247,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -404,7 +404,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -355,7 +355,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ from platformcode import logger, config
|
||||
host = ""
|
||||
headers = ""
|
||||
|
||||
|
||||
def findhost():
|
||||
global host, headers
|
||||
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
|
||||
@@ -30,8 +31,10 @@ list_quality = ['HD', 'default']
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cineblog01')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cineblog01')
|
||||
|
||||
#esclusione degli articoli 'di servizio'
|
||||
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ', 'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬', 'Openload: la situazione. Benvenuto Verystream']
|
||||
# esclusione degli articoli 'di servizio'
|
||||
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ',
|
||||
'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬',
|
||||
'Openload: la situazione. Benvenuto Verystream']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -152,7 +155,7 @@ def peliculas(item):
|
||||
listGroups = ['thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot']
|
||||
action = 'findvideos'
|
||||
else:
|
||||
patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?<\/(p|div)>([^<>]+)'
|
||||
patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?</(?:p|div)>(.*?)</div'
|
||||
listGroups = ['thumb', 'url', 'title', 'genre', 'year', 'plot']
|
||||
action = 'episodios'
|
||||
|
||||
@@ -164,9 +167,43 @@ def peliculas(item):
|
||||
|
||||
def episodios(item):
|
||||
item.contentType = 'episode'
|
||||
return support.scrape(item, patron_block=[r'<article class="sequex-post-content">(.*?)<\/article>',
|
||||
r'<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>(.*?)<div class="spdiv">\[riduci\]</div>'],
|
||||
patron='<p>([0-9]+(?:×|×)[0-9]+)(.*?)(?:<\/p>|<br)', listGroups=['title', 'url'])
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
matches = scrapertoolsV2.find_multiple_matches(data,
|
||||
r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>')
|
||||
|
||||
for match in matches:
|
||||
support.log(match)
|
||||
blocks = scrapertoolsV2.find_multiple_matches(match, '(?:<p>)(.*?)(?:</p>|<br)')
|
||||
season = scrapertoolsV2.find_single_match(match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip()
|
||||
|
||||
for block in blocks:
|
||||
episode = scrapertoolsV2.find_single_match(block, r'([0-9]+(?:×|×)[0-9]+)').strip()
|
||||
seasons_n = scrapertoolsV2.find_single_match(block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip()
|
||||
|
||||
if seasons_n:
|
||||
season = seasons_n
|
||||
|
||||
if not episode: continue
|
||||
|
||||
season = re.sub(r'–|–', "-", season)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contentType,
|
||||
title="[B]" + episode + "[/B] " + season,
|
||||
fulltitle=episode + " " + season,
|
||||
show=episode + " " + season,
|
||||
url=block,
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
infoLabels=item.infoLabels
|
||||
))
|
||||
|
||||
support.videolibrary(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
@@ -177,7 +214,7 @@ def findvideos(item):
|
||||
|
||||
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
|
||||
streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '')
|
||||
support.log('STREAMING=',streaming)
|
||||
support.log('STREAMING=', streaming)
|
||||
patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(streaming)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -238,7 +275,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
@@ -253,7 +290,7 @@ def findvid_serie(item):
|
||||
def load_vid_series(html, item, itemlist, blktxt):
|
||||
logger.info('HTML' + html)
|
||||
patron = '<a href="([^"]+)"[^=]+="_blank"[^>]+>(.*?)</a>'
|
||||
# Estrae i contenuti
|
||||
# Estrae i contenuti
|
||||
matches = re.compile(patron, re.DOTALL).finditer(html)
|
||||
for match in matches:
|
||||
scrapedurl = match.group(1)
|
||||
@@ -310,6 +347,7 @@ def findvid_serie(item):
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
support.log()
|
||||
itemlist = []
|
||||
|
||||
@@ -144,7 +144,7 @@ def findvideos(item):
|
||||
itemlist.append(itemlist1[i])
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -226,7 +226,7 @@ def findvideos(item): # Questa def. deve sempre essere nominata findvideos
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Necessario per FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Necessario per AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -4,77 +4,187 @@
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from channels import filtertools, support, autoplay
|
||||
from core import scrapertools, servertools, httptools, scrapertoolsV2
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
from core import tmdb
|
||||
|
||||
host = 'https://cinemastreaming.info'
|
||||
host = 'https://cinemastreaming.icu'
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango']
|
||||
list_quality = ['1080p', '1080p 3D', 'SD', 'CAM', 'default']
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
log()
|
||||
support.log()
|
||||
|
||||
# Menu Principale
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Film bold', 'peliculas', host + '/film/')
|
||||
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
|
||||
support.menu(itemlist, 'Anime bold', 'peliculas', host + '/category/anime/')
|
||||
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serie-tv/', contentType='episode')
|
||||
support.menu(itemlist, 'Ultime Uscite submenu', 'peliculas', host + "/stagioni/", "episode", args='latests')
|
||||
support.menu(itemlist, 'Ultimi Episodi submenu', 'peliculas_latest_ep', host + "/episodi/", "episode", args='lateste')
|
||||
support.menu(itemlist, '[COLOR blue]Cerca...[/COLOR]', 'search')
|
||||
|
||||
itemlist = [Item(channel = item.channel,
|
||||
contentType = 'movie',
|
||||
title = 'Film',
|
||||
url = host + '/film/',
|
||||
action = 'video',
|
||||
thumbnail = '',
|
||||
fanart = ''
|
||||
),
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
def video(item):
|
||||
log()
|
||||
|
||||
itemlist = [] # Creo una lista Vuota
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
block = scrapertools.find_single_match(data, r'<main>(.*?)<\/main>')
|
||||
block = re.sub('\t|\n', '', block)
|
||||
|
||||
patron = r'<article.*?class="TPost C">.*?<a href="([^"]+)">.*?src="([^"]+)".*?>.*?<h3 class="Title">([^<]+)<\/h3>(.*?)<\/article>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(block)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedtitle, scrapedinfo in matches:
|
||||
log('Info Block', scrapedinfo)
|
||||
patron = r'<span class="Year">(.*?)<\/span>.*?<span class="Vote.*?">(.*?)<\/span>.*?<div class="Description"><p>(.*?)<\/p>.*?<p class="Genre.*?">(.*?)<\/p><p class="Director.*?">.*?<a.*?>(.*?)<\/a>.*?<p class="Actors.*?">(.*?)<\/p>'
|
||||
info = re.compile(patron, re.DOTALL).findall(scrapedinfo)
|
||||
for year, rating, plot, genre, director, cast in info:
|
||||
genre = scrapertools.find_multiple_matches(genre, r'<a.*?>(.*?)<\/a>')
|
||||
cast = scrapertools.find_multiple_matches(cast, r'<a.*?>(.*?)<\/a>')
|
||||
|
||||
infoLabels = {}
|
||||
infoLabels['Year'] = year
|
||||
infoLabels['Rating'] = rating
|
||||
infoLabels['Plot'] = plot
|
||||
infoLabels['Genre'] = genre
|
||||
infoLabels['Director'] = director
|
||||
infoLabels['Cast'] = cast
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contentType,
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumb,
|
||||
infoLabels = infoLabels,
|
||||
show=scrapedtitle))
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def log(stringa1="", stringa2=""):
|
||||
import inspect, os
|
||||
from platformcode import logger
|
||||
logger.info("[" + os.path.basename(__file__) + "] - [" + inspect.stack()[1][3] + "] " + str(stringa1) + str(stringa2))
|
||||
def peliculas(item):
|
||||
support.log()
|
||||
list_groups = ["url", "thumb", "title", "year", "rating", "duration"]
|
||||
|
||||
patron = r'<article.*?"TPost C".*?href="([^"]+)".*?img.*?src="([^"]+)".*?<h3.*?>([^<]+).*?Year">'
|
||||
|
||||
if item.args == "latests":
|
||||
patron += r'([^<]+)'
|
||||
else:
|
||||
patron += r'(\d{4}).*?AAIco-star.*?>([^<]+).*?AAIco-access_time">([^<]+).*?Qlty'
|
||||
|
||||
patron_next = r'page-numbers current.*?href="([^"]+)"'
|
||||
|
||||
if item.contentType == "movie":
|
||||
patron += r'\">([^<]+)'
|
||||
list_groups.append("quality")
|
||||
|
||||
action = "findvideos" if item.contentType == "movie" else "episodios"
|
||||
|
||||
return support.scrape(item, patron, list_groups, patronNext=patron_next, action=action)
|
||||
|
||||
|
||||
def peliculas_latest_ep(item):
|
||||
|
||||
patron = r'<article.*?"TPost C".*?href="([^"]+)".*?img.*?src="([^"]+)"'
|
||||
patron += r'.*?class="ClB">([^<]+)<\/span>([^<]+).*?<h3.*?>([^<]+)'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for scrapedurl, scrapedthumbnail, scrapednum, scrapedep, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contentType,
|
||||
title="[B]" + scrapednum + "[/B]" + scrapedep + " - " + scrapedtitle,
|
||||
fulltitle=scrapedep + " " + scrapedtitle,
|
||||
show=scrapedep + " " + scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=item.extra,
|
||||
thumbnail="http:" + scrapedthumbnail,
|
||||
infoLabels=item.infoLabels
|
||||
))
|
||||
|
||||
support.nextPage(itemlist, item, data, r'page-numbers current.*?href="([^"]+)"')
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_menu(item):
|
||||
itemlist = peliculas(item)
|
||||
return itemlist[:-1]
|
||||
|
||||
|
||||
def episodios(item):
|
||||
patron = r'<td class="MvTbTtl"><a href="([^"]+)">(.*?)<\/a>.*?>\d{4}<'
|
||||
list_groups = ["url", "title", "year"]
|
||||
|
||||
itemlist = support.scrape(item, patron, list_groups)
|
||||
|
||||
for itm in itemlist:
|
||||
fixedtitle = scrapertools.get_season_and_episode(itm.url)
|
||||
itm.title = fixedtitle + " - " + itm.title
|
||||
itm.fulltitle = fixedtitle + " - " + itm.fulltitle
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu(item):
|
||||
patron_block = r'<ul class="sub-menu">.*?</ul>'
|
||||
patron = r'menu-category-list"><a href="([^"]+)">([^<]+)<'
|
||||
list_groups = ["url", "title"]
|
||||
|
||||
return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patron_block=patron_block)
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.log("s=", texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except Exception, e:
|
||||
import traceback
|
||||
traceback.print_stack()
|
||||
support.log(str(e))
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.log("newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.url = host + "/episodi/"
|
||||
item.action = "peliculas"
|
||||
item.args = "lateste"
|
||||
item.contentType = "episode"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except Exception, e:
|
||||
import traceback
|
||||
traceback.print_stack()
|
||||
support.log(str(e))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
|
||||
if item.quality.lower() in ["ended", "canceled", "returning series"]:
|
||||
return episodios(item)
|
||||
|
||||
itemlist = []
|
||||
data = scrapertoolsV2.decodeHtmlentities(httptools.downloadpage(item.url).data)
|
||||
btns = re.compile(r'data-tplayernv="Opt.*?><span>([^<]+)</span><span>([^<]+)</span>', re.DOTALL).findall(data)
|
||||
matches = re.compile(r'<iframe.*?src="([^"]+trembed=[^"]+)', re.DOTALL).findall(data)
|
||||
for i, scrapedurl in enumerate(matches):
|
||||
|
||||
scrapedurl = scrapertoolsV2.decodeHtmlentities(scrapedurl)
|
||||
patron = r'<iframe.*?src="([^"]+)"'
|
||||
link_data = httptools.downloadpage(scrapedurl).data
|
||||
url = scrapertoolsV2.find_single_match(link_data, patron)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
contentType=item.contentType,
|
||||
title="[B]" + btns[i][0] + "[/B] - " + btns[i][1],
|
||||
fulltitle=btns[i][0] + " " + btns[i][1],
|
||||
show=btns[i][0] + " " + btns[i][1],
|
||||
url=url,
|
||||
extra=item.extra,
|
||||
infoLabels=item.infoLabels,
|
||||
server=btns[i][0],
|
||||
contentQuality=btns[i][1].replace('Italiano - ', ''),
|
||||
))
|
||||
|
||||
if item.contentType == "movie":
|
||||
support.videolibrary(itemlist, item)
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -189,7 +189,7 @@ def findvideos(item):
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -211,7 +211,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -237,7 +237,7 @@ def findvideos(item):
|
||||
pass
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -162,7 +162,7 @@ def categorie(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for value in matches:
|
||||
url = "%s/genere/%s" % (host, value)
|
||||
url = host + '/filter?genere=' + value
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="serietv",
|
||||
|
||||
@@ -268,7 +268,7 @@ def findvideos(item):
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -397,7 +397,7 @@ def findvideos(item):
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -236,7 +236,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
12
channels/filmontv.json
Normal file
12
channels/filmontv.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"id": "filmontv",
|
||||
"name": "Film in tv",
|
||||
"language": ["ita"],
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": null,
|
||||
"banner": null,
|
||||
"categories": [],
|
||||
"settings": [],
|
||||
"channel": false
|
||||
}
|
||||
89
channels/filmontv.py
Normal file
89
channels/filmontv.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale film in tv
|
||||
# Ringraziamo Icarus crew
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from channels import support
|
||||
from core import httptools, scrapertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = "https://www.comingsoon.it"
|
||||
|
||||
TIMEOUT_TOTAL = 60
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info(" mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title=support.typo("IN ONDA ADESSO bold color kod"),
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/" % host,
|
||||
thumbnail=""),
|
||||
Item(channel=item.channel,
|
||||
title="Mattina",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/mattina/" % host,
|
||||
thumbnail=""),
|
||||
Item(channel=item.channel,
|
||||
title="Pomeriggio",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/pomeriggio/" % host,
|
||||
thumbnail=""),
|
||||
Item(channel=item.channel,
|
||||
title="Sera",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/sera/" % host,
|
||||
thumbnail=""),
|
||||
Item(channel=item.channel,
|
||||
title="Notte",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/notte/" % host,
|
||||
thumbnail="")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def tvoggi(item):
|
||||
logger.info("filmontv tvoggi")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<div class="col-xs-5 box-immagine">[^<]+<img src="([^"]+)[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<.*?titolo">(.*?)<[^<]+<[^<]+<[^<]+<[^>]+><br />(.*?)<[^<]+</div>[^<]+<[^<]+<[^<]+<[^>]+>[^<]+<[^<]+<[^<]+<[^>]+><[^<]+<[^>]+>:\s*([^<]+)[^<]+<[^<]+[^<]+<[^<]+[^<]+<[^<]+[^<]+[^>]+>:\s*([^<]+)'
|
||||
# patron = r'<div class="col-xs-5 box-immagine">[^<]+<img src="([^"]+)[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<.*?titolo">(.*?)<[^<]+<[^<]+<[^<]+<[^>]+><br />(.*?)<[^<]+</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedtv, scrapedgender, scrapedyear in matches:
|
||||
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
|
||||
scrapedurl = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
infoLabels = {}
|
||||
infoLabels["year"] = scrapedyear
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="do_search",
|
||||
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
|
||||
title=scrapedtitle + "[COLOR yellow] " + scrapedtv + "[/COLOR]",
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
contentTitle=scrapedtitle,
|
||||
contentType='movie',
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def do_search(item):
|
||||
from channels import search
|
||||
return search.do_search(item)
|
||||
@@ -183,7 +183,7 @@ def findvideos(item):
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Necessario per FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Necessario per AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -244,7 +244,7 @@ def findvideos(item): # Questa def. deve sempre essere nominata findvideos
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Necessario per FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Necessario per AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -120,7 +120,7 @@ def context(item, list_language=None, list_quality=None, exist=False):
|
||||
_context = []
|
||||
|
||||
if access():
|
||||
dict_data = {"title": "FILTRO: Configurar", "action": "config_item", "channel": "filtertools"}
|
||||
dict_data = {"title": config.get_localized_string(60426), "action": "config_item", "channel": "filtertools"}
|
||||
if list_language:
|
||||
dict_data["list_language"] = list_language
|
||||
if list_quality:
|
||||
@@ -139,10 +139,10 @@ def context(item, list_language=None, list_quality=None, exist=False):
|
||||
|
||||
if item.action == "play":
|
||||
if not exist:
|
||||
_context.append({"title": "FILTRO: Añadir '%s'" % item.language, "action": "save_from_context",
|
||||
_context.append({"title": config.get_localized_string(60427) % item.language, "action": "save_from_context",
|
||||
"channel": "filtertools", "from_channel": item.channel})
|
||||
else:
|
||||
_context.append({"title": "FILTRO: Borrar '%s'" % item.language, "action": "delete_from_context",
|
||||
_context.append({"title": config.get_localized_string(60428) % item.language, "action": "delete_from_context",
|
||||
"channel": "filtertools", "from_channel": item.channel})
|
||||
|
||||
return _context
|
||||
@@ -150,7 +150,7 @@ def context(item, list_language=None, list_quality=None, exist=False):
|
||||
|
||||
def show_option(itemlist, channel, list_language, list_quality):
|
||||
if access():
|
||||
itemlist.append(Item(channel=__channel__, title="[COLOR %s]Configurar filtro para series...[/COLOR]" %
|
||||
itemlist.append(Item(channel=__channel__, title=config.get_localized_string(60429) %
|
||||
COLOR.get("parent_item", "auto"), action="load",
|
||||
list_language=list_language,
|
||||
list_quality=list_quality, from_channel=channel))
|
||||
@@ -377,17 +377,16 @@ def mainlist(channel, list_language, list_quality):
|
||||
|
||||
idx += 1
|
||||
name = dict_series.get(tvshow, {}).get(TAG_NAME, tvshow)
|
||||
activo = " (desactivado)"
|
||||
activo = config.get_localized_string(60433)
|
||||
if dict_series[tvshow][TAG_ACTIVE]:
|
||||
activo = ""
|
||||
title = "Configurar [COLOR %s][%s][/COLOR]%s" % (tag_color, name, activo)
|
||||
title = config.get_localized_string(60434) % (tag_color, name, activo)
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="config_item", title=title, show=name,
|
||||
list_language=list_language, list_quality=list_quality, from_channel=channel))
|
||||
|
||||
if len(itemlist) == 0:
|
||||
itemlist.append(Item(channel=channel, action="mainlist", title="No existen filtros, busca una serie y "
|
||||
"pulsa en menú contextual 'FILTRO: Configurar'"))
|
||||
itemlist.append(Item(channel=channel, action="mainlist", title=config.get_localized_string(60435)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -377,7 +377,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -303,7 +303,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -163,7 +163,7 @@ def findvideos(item):
|
||||
support.log()
|
||||
|
||||
itemlist = support.server(item, data=item.url)
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
@@ -315,7 +315,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ def findvideos(item):
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
@@ -248,7 +248,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -262,7 +262,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
44
channels/serietvsubita.json
Normal file
44
channels/serietvsubita.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"id": "serietvsubita",
|
||||
"name": "Serie TV Sub ITA",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
|
||||
"banner": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
|
||||
"categories": ["tvshow"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "http://serietvsubita.xyz/",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
352
channels/serietvsubita.py
Normal file
352
channels/serietvsubita.py
Normal file
@@ -0,0 +1,352 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per Serie Tv Sub ITA
|
||||
# Ringraziamo Icarus crew
|
||||
# ----------------------------------------------------------
|
||||
import inspect
|
||||
import re
|
||||
import time
|
||||
|
||||
import channelselector
|
||||
from channels import autoplay, support, filtertools
|
||||
from core import httptools, tmdb, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = config.get_setting("channel_host", 'serietvsubita')
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['gounlimited','verystream','streamango','openload']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
support.log(item.channel + 'mainlist')
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
|
||||
support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow')
|
||||
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow')
|
||||
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
|
||||
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
action="channel_config",
|
||||
title=support.typo("Configurazione Canale color lime"),
|
||||
config=item.channel,
|
||||
folder=False,
|
||||
thumbnail=channelselector.get_thumb('setting_0.png'))
|
||||
)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def cleantitle(scrapedtitle):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','')
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||||
if year:
|
||||
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
|
||||
|
||||
|
||||
return scrapedtitle.strip()
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_serie(item):
|
||||
support.log(item.channel + " lista_serie")
|
||||
itemlist = []
|
||||
|
||||
PERPAGE = 15
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
title = cleantitle(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodes",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='lista_serie',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=scrapedurl,
|
||||
args=item.args,
|
||||
thumbnail=support.thumb()))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodes(item):
|
||||
support.log(item.channel + " episodes")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
|
||||
patron += '<p><a href="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
title = scrapedtitle.split(" S0")[0].strip()
|
||||
title = title.split(" S1")[0].strip()
|
||||
title = title.split(" S2")[0].strip()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
contentSerieName=title,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazionazione
|
||||
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='episodes',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=next_page,
|
||||
args=item.args,
|
||||
thumbnail=support.thumb()))
|
||||
|
||||
# support.videolibrary(itemlist,item,'bold color kod')
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
support.log(item.channel + " findvideos")
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for keeplinks, id in matches:
|
||||
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
|
||||
['Referer', keeplinks]]
|
||||
|
||||
html = httptools.downloadpage(keeplinks, headers=headers).data
|
||||
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
|
||||
|
||||
itemlist = support.server(item, data=data)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def peliculas_tv(item):
|
||||
logger.info("icarus serietvsubita peliculas_tv")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug(data)
|
||||
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if "FACEBOOK" in scrapedtitle or "RAPIDGATOR" in scrapedtitle:
|
||||
continue
|
||||
if scrapedtitle == "WELCOME!":
|
||||
continue
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
title = scrapedtitle.split(" S0")[0].strip()
|
||||
title = title.split(" S1")[0].strip()
|
||||
title = title.split(" S2")[0].strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
contentSerieName=title,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
|
||||
# Paginazione
|
||||
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
if item.extra == "search_tv":
|
||||
next_page = next_page.replace('&', '&')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='peliculas_tv',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=next_page,
|
||||
args=item.args,
|
||||
extra=item.extra,
|
||||
thumbnail=support.thumb()))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info('serietvsubita' + " newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.url = host;
|
||||
item.extra = 'serie';
|
||||
try:
|
||||
if categoria == "series":
|
||||
itemlist = peliculas_tv(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info(item.channel + " search")
|
||||
itemlist = []
|
||||
item.extra = "search_tv"
|
||||
|
||||
item.url = host + "/?s=" + texto + "&op.x=0&op.y=0"
|
||||
|
||||
try:
|
||||
return peliculas_tv(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def list_az(item):
|
||||
support.log(item.channel+" list_az")
|
||||
itemlist = []
|
||||
PERPAGE = 50
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Scarico la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Articoli
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
title = cleantitle(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodes",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='list_az',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=scrapedurl,
|
||||
args=item.args,
|
||||
extra=item.extra,
|
||||
thumbnail=support.thumb()))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
44
channels/serietvu.json
Normal file
44
channels/serietvu.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"id": "serietvu",
|
||||
"name": "SerieTVU",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
|
||||
"banner": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
|
||||
"categories": ["tvshow"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://www.serietvu.club",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
296
channels/serietvu.py
Normal file
296
channels/serietvu.py
Normal file
@@ -0,0 +1,296 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per SerieTVU
|
||||
# Ringraziamo Icarus crew
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
import channelselector
|
||||
from channels import autoplay, support, filtertools
|
||||
from core import httptools, tmdb, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = config.get_setting("channel_host", 'serietvu')
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['speedvideo']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
support.log(item.channel + 'mainlist')
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
|
||||
support.menu(itemlist, 'Novità submenu', 'latestep', "%s/ultimi-episodi" % host,'tvshow')
|
||||
# support.menu(itemlist, 'Nuove serie color azure', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
|
||||
support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow')
|
||||
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
|
||||
|
||||
|
||||
# autoplay.init(item.channel, list_servers, list_quality)
|
||||
# autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
action="channel_config",
|
||||
title=support.typo("Configurazione Canale color lime"),
|
||||
config=item.channel,
|
||||
folder=False,
|
||||
thumbnail=channelselector.get_thumb('setting_0.png'))
|
||||
)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def cleantitle(scrapedtitle):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','').replace('Flash 2014','Flash')
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||||
if year:
|
||||
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
|
||||
|
||||
|
||||
return scrapedtitle.strip()
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_serie(item):
|
||||
support.log(item.channel + " lista_serie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle in matches:
|
||||
infoLabels = {}
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||||
if year:
|
||||
infoLabels['year'] = year
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedimg,
|
||||
show=scrapedtitle,
|
||||
infoLabels=infoLabels,
|
||||
contentType='tvshow',
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Pagine
|
||||
support.nextPage(itemlist,item,data,'<li><a href="([^"]+)">Pagina successiva')
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodios(item):
|
||||
support.log(item.channel + " episodios")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<option value="(\d+)"[\sselected]*>.*?</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for value in matches:
|
||||
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value
|
||||
blocco = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">)[^>]+>[^>]+>([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
|
||||
number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=value + "x" + number.zfill(2),
|
||||
fulltitle=scrapedtitle,
|
||||
contentType="episode",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedimg,
|
||||
extra=scrapedextra,
|
||||
folder=True))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title=support.typo(config.get_localized_string(30161) + ' bold color kod'),
|
||||
thumbnail=support.thumb(),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
support.log(item.channel + " findvideos")
|
||||
|
||||
itemlist = support.server(item, data=item.url)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findepisodevideo(item):
|
||||
support.log(item.channel + " findepisodevideo")
|
||||
|
||||
# Download Pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Prendo il blocco specifico per la stagione richiesta
|
||||
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
|
||||
blocco = scrapertools.find_single_match(data, patron)
|
||||
|
||||
# Estraggo l'episodio
|
||||
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[0][1].lstrip("0")
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
itemlist = support.server(item, data=matches[0][0])
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def latestep(item):
|
||||
support.log(item.channel + " latestep")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<small>([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches:
|
||||
infoLabels = {}
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||||
if year:
|
||||
infoLabels['year'] = year
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
|
||||
infoLabels['tvshowtitle'] = scrapedtitle
|
||||
|
||||
episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo)
|
||||
title = "%s %s" % (scrapedtitle, scrapedinfo)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findepisodevideo",
|
||||
title=title,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=episodio,
|
||||
thumbnail=scrapedimg,
|
||||
show=scrapedtitle,
|
||||
contentTitle=scrapedtitle,
|
||||
contentSerieName=title,
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info('serietvu' + " newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.url = host + "/ultimi-episodi"
|
||||
item.action = "latestep"
|
||||
itemlist = latestep(item)
|
||||
|
||||
if itemlist[-1].action == "latestep":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info(item.channel + " search")
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return lista_serie(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def categorie(item):
|
||||
logger.info(item.channel +" categorie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>')
|
||||
patron = r'<li><a href="([^"]+)">([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if scrapedtitle == 'Home Page' or scrapedtitle == 'Calendario Aggiornamenti':
|
||||
continue
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title=scrapedtitle,
|
||||
contentType="tv",
|
||||
url="%s%s" % (host, scrapedurl),
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
@@ -135,25 +135,24 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
matches = scrapertoolsV2.find_multiple_matches(block, patron)
|
||||
log('MATCHES =', matches)
|
||||
|
||||
known_keys = ['url', 'title', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating']
|
||||
for match in matches:
|
||||
if len(listGroups) > len(match): # to fix a bug
|
||||
match = list(match)
|
||||
match.extend([''] * (len(listGroups)-len(match)))
|
||||
match.extend([''] * (len(listGroups) - len(match)))
|
||||
|
||||
scrapedurl = url_host+match[listGroups.index('url')] if 'url' in listGroups else ''
|
||||
scrapedtitle = match[listGroups.index('title')] if 'title' in listGroups else ''
|
||||
scrapedthumb = match[listGroups.index('thumb')] if 'thumb' in listGroups else ''
|
||||
scrapedquality = match[listGroups.index('quality')] if 'quality' in listGroups else ''
|
||||
scrapedyear = match[listGroups.index('year')] if 'year' in listGroups else ''
|
||||
scrapedplot = match[listGroups.index('plot')] if 'plot' in listGroups else ''
|
||||
scrapedduration = match[listGroups.index('duration')] if 'duration' in listGroups else ''
|
||||
scrapedgenre = match[listGroups.index('genre')] if 'genre' in listGroups else ''
|
||||
scrapedrating = match[listGroups.index('rating')] if 'rating' in listGroups else ''
|
||||
scraped = {}
|
||||
for kk in known_keys:
|
||||
val = match[listGroups.index(kk)] if kk in listGroups else ''
|
||||
if kk == "url":
|
||||
val = url_host + val
|
||||
scraped[kk] = val
|
||||
|
||||
title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
|
||||
plot = scrapertoolsV2.decodeHtmlentities(scrapedplot)
|
||||
if scrapedquality:
|
||||
longtitle = '[B]' + title + '[/B] [COLOR blue][' + scrapedquality + '][/COLOR]'
|
||||
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
|
||||
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
|
||||
|
||||
if scraped["quality"]:
|
||||
longtitle = '[B]' + title + '[/B] [COLOR blue][' + scraped["quality"] + '][/COLOR]'
|
||||
else:
|
||||
longtitle = '[B]' + title + '[/B]'
|
||||
|
||||
@@ -161,37 +160,48 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
infolabels = item.infoLabels
|
||||
else:
|
||||
infolabels = {}
|
||||
if scrapedyear:
|
||||
infolabels['year'] = scrapedyear
|
||||
if scrapedplot:
|
||||
if scraped["year"]:
|
||||
infolabels['year'] = scraped["year"]
|
||||
if scraped["plot"]:
|
||||
infolabels['plot'] = plot
|
||||
if scrapedduration:
|
||||
matches = scrapertoolsV2.find_multiple_matches(scrapedduration, r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
|
||||
if scraped["duration"]:
|
||||
matches = scrapertoolsV2.find_multiple_matches(scraped["duration"],r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
|
||||
for h, m in matches:
|
||||
scrapedduration = int(h) * 60 + int(m)
|
||||
infolabels['duration'] = int(scrapedduration) * 60
|
||||
if scrapedgenre:
|
||||
genres = scrapertoolsV2.find_multiple_matches(scrapedgenre, '[A-Za-z]+')
|
||||
infolabels['genre'] = ", ".join(genres)
|
||||
if scrapedrating:
|
||||
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scrapedrating)
|
||||
scraped["duration"] = int(h) * 60 + int(m)
|
||||
if not matches:
|
||||
scraped["duration"] = scrapertoolsV2.find_single_match(scraped["duration"], r'(\d+)')
|
||||
infolabels['duration'] = int(scraped["duration"]) * 60
|
||||
if scraped["genere"]:
|
||||
genres = scrapertoolsV2.find_multiple_matches(scraped["genere"], '[A-Za-z]+')
|
||||
infolabels['genere'] = ", ".join(genres)
|
||||
if scraped["rating"]:
|
||||
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"])
|
||||
|
||||
if not scrapedtitle in blacklist:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action=action,
|
||||
contentType=item.contentType,
|
||||
title=longtitle,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
quality=scrapedquality,
|
||||
url=scrapedurl,
|
||||
infoLabels=infolabels,
|
||||
thumbnail=scrapedthumb
|
||||
)
|
||||
if scraped["title"] not in blacklist:
|
||||
it = Item(
|
||||
channel=item.channel,
|
||||
action=action,
|
||||
contentType=item.contentType,
|
||||
title=longtitle,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
quality=scraped["quality"],
|
||||
url=scraped["url"],
|
||||
infoLabels=infolabels,
|
||||
thumbnail=scraped["thumb"]
|
||||
)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
for lg in list(set(listGroups).difference(known_keys)):
|
||||
it.__setattr__(lg, match[listGroups.index(lg)])
|
||||
|
||||
itemlist.append(it)
|
||||
|
||||
if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \
|
||||
or (item.contentType == "movie" and action != "play"):
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
else:
|
||||
for it in itemlist:
|
||||
it.infoLabels = item.infoLabels
|
||||
|
||||
if patronNext:
|
||||
nextPage(itemlist, item, data, patronNext, 2)
|
||||
|
||||
@@ -474,7 +474,7 @@ def findvideos(item):
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
|
||||
@@ -91,6 +91,11 @@ def getchanneltypes(view="thumb_"):
|
||||
channel_type=channel_type, viewmode="thumbnails",
|
||||
thumbnail=get_thumb("channels_%s.png" % channel_type, view)))
|
||||
|
||||
itemlist.append(Item(title='Oggi in TV', channel="filmontv", action="mainlist", view=view,
|
||||
category=title, channel_type="all", thumbnail=get_thumb("on_the_air.png", view),
|
||||
viewmode="thumbnails"))
|
||||
|
||||
|
||||
itemlist.append(Item(title=config.get_localized_string(70685), channel="community", action="mainlist", view=view,
|
||||
category=title, channel_type="all", thumbnail=get_thumb("channels_community.png", view),
|
||||
viewmode="thumbnails"))
|
||||
|
||||
@@ -22,6 +22,9 @@ from core import channeltools
|
||||
from core import trakt_tools, scrapertoolsV2
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
import xbmcaddon
|
||||
addon = xbmcaddon.Addon('plugin.video.kod')
|
||||
downloadenabled = addon.getSetting('downloadenabled')
|
||||
|
||||
|
||||
class XBMCPlayer(xbmc.Player):
|
||||
@@ -591,7 +594,7 @@ def set_context_commands(item, parent_item):
|
||||
(sys.argv[0], item.clone(action="add_pelicula_to_library",
|
||||
from_action=item.action).tourl())))
|
||||
|
||||
if item.channel != "downloads":
|
||||
if item.channel != "downloads" and downloadenabled != "false":
|
||||
# Descargar pelicula
|
||||
if item.contentType == "movie" and item.contentTitle:
|
||||
context_commands.append((config.get_localized_string(60354), "XBMC.RunPlugin(%s?%s)" %
|
||||
|
||||
@@ -5439,11 +5439,11 @@ msgid "Disclaimer"
|
||||
msgstr "Disclaimer"
|
||||
|
||||
msgctxt "#70691"
|
||||
msgid "Utilizzando la funzione di download dichiari di essere in possesso di una copia fisica e di utilizzare questa funzione come backup dello stesso."
|
||||
msgid "Using the download function you declare that you have a physical copy and use this function as a backup of the same."
|
||||
msgstr "Utilizzando la funzione di download dichiari di essere in possesso di una copia fisica e di utilizzare questa funzione come backup dello stesso."
|
||||
|
||||
msgctxt "#70692"
|
||||
msgid "Il team di KOD non si assume alcuna responsabilità dell'uso che viene fatto di questa funzione proposta"
|
||||
msgid "The KOD team assumes no responsibility for the use that is made of this proposed function"
|
||||
msgstr "Il team di KOD non si assume alcuna responsabilità dell'uso che viene fatto di questa funzione proposta"
|
||||
|
||||
msgctxt "#70693"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://gounlimited.to/embed-(.*?).html",
|
||||
"pattern": "https://gounlimited.to/(?:embed-|)([a-z0-9]+)(?:.html|)",
|
||||
"url": "https://gounlimited.to/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
|
||||
9
version.json
Normal file
9
version.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"update": {
|
||||
"name": "Kodi on Demand",
|
||||
"version":"101",
|
||||
"tag": "1.0.1",
|
||||
"date": "03/05/2019",
|
||||
"changes": "Added Updater"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user