fix: fastsubita per i decoratori
This commit is contained in:
@@ -1,70 +1,20 @@
|
||||
{
|
||||
"id": "fastsubita",
|
||||
"name": "Fastsubita",
|
||||
"language": ["ita"],
|
||||
"language": ["vosi"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "fastsubita.png",
|
||||
"banner": "fastsubita.png",
|
||||
"categories": ["tvshow", "vosi"],
|
||||
"categories": ["tvshow", "vos"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "http://fastsubita.com",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "@70727",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -3,146 +3,130 @@
|
||||
# Canale per fastsubita
|
||||
# Thanks Icarus crew & Alfa addon & 4l3x87
|
||||
# ------------------------------------------------------------
|
||||
"""
|
||||
|
||||
from core import scrapertools, httptools, tmdb, support
|
||||
Problemi noti che non superano il test del canale:
|
||||
- indicare i problemi
|
||||
|
||||
Avvisi:
|
||||
- Eventuali avvisi per i tester
|
||||
|
||||
Ulteriori info:
|
||||
- SOLO SUB-ITA
|
||||
|
||||
---------------------------------------------------
|
||||
Per i DEV:
|
||||
- nella ricerca, anche globale, esce la voce "successivo"
|
||||
ma apre la maschera per l'inserimento testo
|
||||
|
||||
|
||||
"""
|
||||
from core import support, httptools, scrapertoolsV2, tmdb
|
||||
from core.item import Item
|
||||
from core.support import log
|
||||
from platformcode import config, logger
|
||||
from platformcode import config #, logger
|
||||
|
||||
__channel__ = 'fastsubita'
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
host = config.get_channel_url(__channel__)
|
||||
headers = [['Referer', host]]
|
||||
##IDIOMAS = {'Italiano': 'IT'}
|
||||
##list_language = IDIOMAS.values()
|
||||
list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome']
|
||||
list_quality = ['default']
|
||||
|
||||
headers = [
|
||||
['Host', host.split("//")[-1].split("/")[0]],
|
||||
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'],
|
||||
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
|
||||
['Accept-Language', 'en-US,en;q=0.5'],
|
||||
['Accept-Encoding', 'gzip, deflate'],
|
||||
['Referer', host],
|
||||
['DNT', '1'],
|
||||
['Connection', 'keep-alive'],
|
||||
['Upgrade-Insecure-Requests', '1'],
|
||||
['Cache-Control', 'max-age=0']
|
||||
]
|
||||
|
||||
PERPAGE = 15
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
tvshow = ['',
|
||||
('Novità', ['', 'peliculas']),
|
||||
('Archivio A-Z ', ['', 'list_az'])
|
||||
]
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
support.log(item)
|
||||
#dbg # decommentare per attivare web_pdb
|
||||
deflang = 'Sub-ITA'
|
||||
|
||||
action = 'findvideos'
|
||||
blacklist = ['']
|
||||
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+×\d+).+?“(?P<title2>.+?)”).+?">(?P<lang>Sub-ITA)?'
|
||||
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)<nav class="navigation pagination" role="navigation">'
|
||||
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
|
||||
|
||||
def itemHook(item):
|
||||
if item.args == 'newest':
|
||||
item.show = item.title# + support.typo('Sub-ITA', '_ [] color kod')
|
||||
return item
|
||||
|
||||
## debug = True # True per testare le regex sul sito
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
support.log(item)
|
||||
#dbg
|
||||
item.args = 'episodios'
|
||||
deflang = 'Sub-ITA'
|
||||
action = 'findvideos'
|
||||
blacklist = ['']
|
||||
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+×\d+).+?“(?P<title2>.+?)”).+?">(?P<lang>Sub-ITA)?'
|
||||
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)</main>'
|
||||
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
|
||||
|
||||
## debug = True
|
||||
return locals()
|
||||
|
||||
|
||||
def list_az(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
alphabet = dict()
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(serietv()):
|
||||
letter = scrapedtitle[0].upper()
|
||||
if letter not in alphabet:
|
||||
alphabet[letter] = []
|
||||
alphabet[letter].append(str(scrapedurl) + '||' + str(scrapedtitle))
|
||||
|
||||
for letter in sorted(alphabet):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
url='\n\n'.join(alphabet[letter]),
|
||||
title=letter,
|
||||
fulltitle=letter))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def cleantitle(scrapedtitle):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
|
||||
scrapedtitle = scrapedtitle.replace('’', '\'').replace('×', 'x').replace('×', 'x').replace('"', "'")
|
||||
|
||||
return scrapedtitle.strip()
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
log()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.url = host
|
||||
# item.action = "serietv"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "serietv":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
matches, data = support.match(item, r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<',
|
||||
headers=headers)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scraped_1 = scrapedtitle.split("×")[0][:-2]
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
|
||||
scrapedtitle = scrapedtitle.replace(scraped_1, "")
|
||||
|
||||
infoLabels = {}
|
||||
infoLabels['season'] = episode[1]
|
||||
infoLabels['episode'] = episode[2]
|
||||
|
||||
if "http:" in scrapedurl:
|
||||
scrapedurl = scrapedurl
|
||||
else:
|
||||
scrapedurl = "http:" + scrapedurl
|
||||
|
||||
|
||||
serie = cleantitle(scraped_1)
|
||||
title = serie + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod')
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentTpye="tvshow",
|
||||
title=title,
|
||||
fulltitle=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
show=serie,
|
||||
extra=item.extra,
|
||||
contentSerieName=serie,
|
||||
contentLanguage='Sub-ITA',
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
support.checkHost(item, itemlist)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
support.nextPage(itemlist, item, data, '<a class="next page-numbers" href="(.*?)">Successivi')
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def serietv():
|
||||
log()
|
||||
|
||||
itemlist = []
|
||||
matches = support.match(Item(), r'<option class="level-([0-9]?)" value="([^"]+)">([^<]+)</option>',
|
||||
matches = support.match(Item(), r'<option class="level-0" value="([^"]+)">([^<]+)</option>',
|
||||
r'<select\s*?name="cat"\s*?id="cat"\s*?class="postform"\s*?>(.*?)</select>', headers,
|
||||
url="%s/" % host)[0]
|
||||
index = 0
|
||||
|
||||
for level, cat, title in matches:
|
||||
for cat, title in matches:
|
||||
title = cleantitle(title)
|
||||
url = '%s?cat=%s' % (host, cat)
|
||||
if int(level) > 0:
|
||||
itemlist[index - 1][0] += '{|}' + url
|
||||
continue
|
||||
## if int(level) > 0:
|
||||
## itemlist[index - 1][0] += '{|}' + url
|
||||
## continue
|
||||
|
||||
itemlist.append([url, title])
|
||||
|
||||
@@ -163,10 +147,12 @@ def lista_serie(item):
|
||||
series = item.url.split('\n\n')
|
||||
matches = []
|
||||
for i, serie in enumerate(series):
|
||||
matches.append(serie.split('||'))
|
||||
matches.append(serie.decode('utf-8').split('||'))
|
||||
series = matches
|
||||
support.log("SERIE ALF :", series)
|
||||
else:
|
||||
series = serietv()
|
||||
support.log("SERIE ALF 2 :", series)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(series):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
@@ -185,7 +171,7 @@ def lista_serie(item):
|
||||
plot=scrapedplot,
|
||||
show=scrapedtitle,
|
||||
extra=item.extra,
|
||||
contentType='episode',
|
||||
contentType='tvshow',
|
||||
originalUrl=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
@@ -199,145 +185,64 @@ def lista_serie(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
# data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">'
|
||||
# bloque = scrapertools.find_single_match(data, patronBlock)
|
||||
|
||||
patron = r'<a href="([^"]+)">'
|
||||
# matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
matches, data = support.match(item, patron, patronBlock, headers)
|
||||
|
||||
for scrapedurl in matches:
|
||||
if 'is.gd' in scrapedurl:
|
||||
resp = httptools.downloadpage(
|
||||
scrapedurl, follow_redirects=False)
|
||||
data += resp.headers.get("location", "") + '\n'
|
||||
|
||||
return support.server(item, data)
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
log(texto)
|
||||
|
||||
############## Fondo Pagina
|
||||
# da adattare al canale
|
||||
def search(item, text):
|
||||
support.log('search', item)
|
||||
itemlist = []
|
||||
text = text.replace(' ', '+')
|
||||
item.url = host + '?s=' + text
|
||||
# bisogna inserire item.contentType per la ricerca globale
|
||||
# se il canale è solo film, si può omettere, altrimenti bisgona aggiungerlo e discriminare.
|
||||
|
||||
try:
|
||||
series = serietv()
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(series):
|
||||
if texto.upper() in scrapedtitle.upper():
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=scrapedplot,
|
||||
contentType='episode',
|
||||
originalUrl=scrapedurl,
|
||||
folder=True))
|
||||
return itemlist
|
||||
# Continua la ricerca in caso di errore
|
||||
item.contentType = 'tvshow'
|
||||
return peliculas(item)
|
||||
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
log('search log:', line)
|
||||
return []
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
def list_az(item):
|
||||
log()
|
||||
# da adattare al canale
|
||||
# inserire newest solo se il sito ha la pagina con le ultime novità/aggiunte
|
||||
# altrimenti NON inserirlo
|
||||
def newest(categoria):
|
||||
support.log('newest ->', categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.contentType = 'tvshow'
|
||||
item.args = 'newest'
|
||||
item.url = host
|
||||
item.action = 'peliculas'
|
||||
itemlist = peliculas(item)
|
||||
|
||||
alphabet = dict()
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(serietv()):
|
||||
letter = scrapedtitle[0].upper()
|
||||
if letter not in alphabet:
|
||||
alphabet[letter] = []
|
||||
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
|
||||
|
||||
for letter in sorted(alphabet):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
url='\n\n'.join(alphabet[letter]),
|
||||
title=letter,
|
||||
fulltitle=letter))
|
||||
if itemlist[-1].action == 'peliculas':
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.log('newest log: ', {0}.format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
def findvideos(item):
|
||||
support.log('findvideos ->', item)
|
||||
patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">'
|
||||
patron = r'<a href="([^"]+)">'
|
||||
matches, data = support.match(item, patron, patronBlock, headers)
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodios(item, itemlist=[]):
|
||||
log()
|
||||
urls = item.url.split('{|}')
|
||||
patron = r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<'
|
||||
matches, data = support.match(item, patron, headers=headers, url=urls[0])
|
||||
urls.pop(0)
|
||||
if item.args != 'episodios':
|
||||
item.infoLabels['mediatype'] = 'episode'
|
||||
for scrapedurl in matches:
|
||||
if 'is.gd' in scrapedurl:
|
||||
resp = httptools.downloadpage(scrapedurl, follow_redirects=False)
|
||||
data += resp.headers.get("location", "") + '\n'
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
|
||||
|
||||
season = episode[1].lstrip('0').zfill(2)
|
||||
|
||||
infoLabels = {}
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode[2]
|
||||
title = infoLabels['season'] + 'x' + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod')
|
||||
|
||||
if "http:" not in scrapedurl:
|
||||
scrapedurl = "http:" + scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentTpye="episode",
|
||||
title=title,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
show=item.show,
|
||||
extra=item.extra,
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
|
||||
next_page = scrapertools.find_single_match(data, r'<a class="next page-numbers" href="(.*?)">Successivi')
|
||||
if next_page != "":
|
||||
urls.insert(0, next_page)
|
||||
|
||||
if len(urls) > 0:
|
||||
item.url = '{|}'.join(urls)
|
||||
itemlist = episodios(item, itemlist)
|
||||
else:
|
||||
cleanItemlist = []
|
||||
episodes = []
|
||||
for episode in itemlist:
|
||||
if episode.title in episodes: continue
|
||||
cleanItemlist.append(episode)
|
||||
episodes.append(episode.title)
|
||||
itemlist = cleanItemlist
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
item.url = item.originalUrl
|
||||
support.videolibrary(itemlist, item, 'bold color kod')
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
return support.server(item, data)
|
||||
|
||||
Reference in New Issue
Block a user