Merge remote-tracking branch 'origin/master'

This commit is contained in:
marco
2019-11-01 13:45:34 +01:00
18 changed files with 677 additions and 1208 deletions

View File

@@ -1,55 +1,55 @@
{
"altadefinizione01": "https://www.altadefinizione01.cc",
"altadefinizione01_club": "https://www.altadefinizione01.cc",
"altadefinizione01_link": "http://altadefinizione01.town",
"altadefinizioneclick": "https://altadefinizione.cloud",
"altadefinizionehd": "https://altadefinizione.wtf",
"animeforce": "https://ww1.animeforce.org",
"animeleggendari": "https://animepertutti.com",
"animespace": "http://www.animespace.tv",
"animestream": "https://www.animeworld.it",
"animesubita": "http://www.animesubita.org",
"animetubeita": "http://www.animetubeita.com",
"animeworld": "https://www.animeworld.tv",
"casacinema": "https://www.casacinema.uno",
"casacinemainfo": "https://www.casacinema.info",
"cb01anime": "https://www.cineblog01.ink",
"cinemalibero": "https://www.cinemalibero.best",
"documentaristreamingda": "https://documentari-streaming-da.com",
"dreamsub": "https://www.dreamsub.stream",
"eurostreaming": "https://eurostreaming.pink",
"fastsubita": "http://fastsubita.com",
"filmgratis": "https://www.filmaltadefinizione.net",
"filmigratis": "https://filmigratis.org",
"filmpertutti": "https://www.filmpertutti.pub",
"filmsenzalimiti": "https://filmsenzalimiti.best",
"filmsenzalimiticc": "https://www.filmsenzalimiti.press",
"filmstreaming01": "https://filmstreaming01.com",
"filmstreamingita": "http://filmstreamingita.live",
"guardafilm": "http://www.guardafilm.top",
"guardarefilm": "https://www.guardarefilm.red",
"guardaserie_stream": "https://guardaserie.co",
"guardaseriecc": "https://guardaserie.site",
"guardaserieclick": "https://www.guardaserie.media",
"guardogratis": "https://guardogratis.net",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.se",
"italiafilm": "https://www.italia-film.pw",
"italiafilmhd": "https://italiafilm.info",
"italiaserie": "https://italiaserie.org",
"itastreaming": "https://itastreaming.film",
"mondolunatico": "http://mondolunatico.org",
"mondolunatico2": "https://mondolunatico.org/stream",
"mondoserietv": "https://mondoserietv.com",
"piratestreaming": "https://www.piratestreaming.media",
"polpotv": "https://polpo.tv",
"seriehd": "https://www.seriehd.zone",
"serietvonline": "https://serietvonline.tech",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.club",
"streamingaltadefinizione": "https://www.popcornstream.best",
"streamtime": "https://t.me/s/StreamTime",
"tantifilm": "https://www.tantifilm.eu",
"toonitalia": "https://toonitalia.org",
"vedohd": "https://vedohd.video",
"altadefinizione01": "https://www.altadefinizione01.cc",
"altadefinizione01_club": "https://www.altadefinizione01.cc",
"altadefinizione01_link": "http://altadefinizione01.gift",
"altadefinizioneclick": "https://altadefinizione.cloud",
"altadefinizionehd": "https://altadefinizione.wtf",
"animeforce": "https://ww1.animeforce.org",
"animeleggendari": "https://animepertutti.com",
"animespace": "http://www.animespace.tv",
"animestream": "https://www.animeworld.it",
"animesubita": "http://www.animesubita.org",
"animetubeita": "http://www.animetubeita.com",
"animeworld": "https://www.animeworld.tv",
"casacinema": "https://www.casacinema.uno",
"casacinemainfo": "https://www.casacinema.info",
"cb01anime": "https://www.cineblog01.ink",
"cinemalibero": "https://www.cinemalibero.best",
"documentaristreamingda": "https://documentari-streaming-da.com",
"dreamsub": "https://www.dreamsub.stream",
"eurostreaming": "https://eurostreaming.pink",
"fastsubita": "http://fastsubita.com",
"filmgratis": "https://www.filmaltadefinizione.net",
"filmigratis": "https://filmigratis.org",
"filmpertutti": "https://www.filmpertutti.link",
"filmsenzalimiti": "https://filmsenzalimiti.best",
"filmsenzalimiticc": "https://www.filmsenzalimiti.press",
"filmstreaming01": "https://filmstreaming01.com",
"filmstreamingita": "http://filmstreamingita.live",
"guardafilm": "http://www.guardafilm.top",
"guardarefilm": "https://www.guardarefilm.red",
"guardaserie_stream": "https://guardaserie.co",
"guardaseriecc": "https://guardaserie.site",
"guardaserieclick": "https://www.guardaserie.media",
"guardogratis": "https://guardogratis.net",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.se",
"italiafilm": "https://www.italia-film.pw",
"italiafilmhd": "https://italiafilm.info",
"italiaserie": "https://italiaserie.org",
"itastreaming": "https://itastreaming.film",
"mondolunatico": "http://mondolunatico.org",
"mondolunatico2": "https://mondolunatico.org:443/stream",
"mondoserietv": "https://mondoserietv.com",
"piratestreaming": "https://www.piratestreaming.media",
"polpotv": "https://polpo.tv",
"seriehd": "https://www.seriehd.moda",
"serietvonline": "https://serietvonline.best",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.club",
"streamingaltadefinizione": "https://www.popcornstream.best",
"streamtime": "https://t.me/s/StreamTime",
"tantifilm": "https://www.tantifilm.eu",
"toonitalia": "https://toonitalia.org",
"vedohd": "https://vedohd.video",
"vvvvid": "https://www.vvvvid.it"
}
}

View File

@@ -5,10 +5,10 @@
"""
Problemi noti che non superano il test del canale:
-
-
Avvisi:
- Sub-ita è nel titolo, lascia il puntatore sulla locandina
- Sub-ita non è nel titolo, lascia il puntatore sulla locandina
per visualizzare il titolo completo!
Novità:
@@ -17,21 +17,10 @@
"""
# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato
# ma fare PULIZIA quando si è finito di testarlo
# Qui gli import
#import re
# per l'uso dei decoratori, per i log, e funzioni per siti particolari
from core import support
# in caso di necessità
from core import scrapertoolsV2, httptools
from core.item import Item
#from lib import unshortenit
##### fine import
host = ""
headers = ""
@@ -45,17 +34,13 @@ def findhost():
host = host[:-1]
findhost()
# server di esempio...
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
# quality di esempio
list_quality = ['default', 'HD', '3D', '4K', 'DVD', 'SD']
@support.menu
def mainlist(item):
support.log(item)
# Ordine delle voci
# Voce FILM, puoi solo impostare l'url
film = ['',
('Al Cinema', ['/category/in-sala/', 'peliculas', '']),
('Novità', ['/category/nuove-uscite/', 'peliculas', '']),
@@ -69,9 +54,8 @@ def mainlist(item):
@support.scrape
def peliculas(item):
support.log(item)
#dbg # decommentare per attivare web_pdb
#support.dbg() # decommentare per attivare web_pdb
## action = 'episodios'
blacklist = ['']
if item.args != 'search':
patron = r'<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="(?P<url>[^"]+)" title="(?P<title>.+?)[ ]?(?:\[(?P<lang>Sub-ITA)\])?".*?<img(?:.+?)?src="(?P<thumb>[^"]+)"'
@@ -79,16 +63,17 @@ def peliculas(item):
else:
patron = r'<li class="col-md-12 itemlist">.*?<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)".*?<img src="(?P<thumb>[^"]+)".*?Film dell"anno: (?P<year>\d{4})(?:[\d\-]+)?</p> <p class="text-list">(?P<plot>[^<>]+)</p>'
patronBlock = r'<ul class="search-results-content infinite">(?P<block>.*?)</section>'
patronNext = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
#debug = True # True per testare le regex sul sito
patronNext = '<a href="([^"]+)"\s+?><i class="glyphicon glyphicon-chevron-right"'
#debug = True
return locals()
@support.scrape
def genres(item):
support.log(item)
#dbg
#support.dbg()
action = 'peliculas'
blacklist = ['']

View File

@@ -7,5 +7,6 @@
"thumbnail": "dreamsub.png",
"banner": "dreamsub.png",
"categories": ["anime", "vos"],
"not_active": ["include_in_newest"],
"settings": []
}

View File

@@ -6,46 +6,49 @@
"""
Problemi noti che non superano il test del canale:
- indicare i problemi
- Nessuno noto!
Avvisi:
- Gli episodi sono divisi per pagine di 20
- In Novità->Anime, cliccare sulla home il bottone "Ultime inserite"
Avvisi per i tester:
1. Gli episodi sono divisi per pagine di 20
2. In Novità->Anime, cliccare sulla home il bottone "Ultime inserite"
Se avete più titoli in KOD, ridimensiona il browser in modo che si vedano i titoli
a gruppi di 3 e ricontrollare, è un problema del sito.
3.Passaggi per Aggiungere in videoteca e/o scaricare Serie:
1. sul titolo -> menu contestuale -> Rinumerazione
Solo dopo questo passaggio appariranno le voci, sul titolo -> menu contestuale ->:
- Aggiungi in videoteca (senza rinumerazione non appare
la voce)
- Scarica Serie e Scarica Stagione ( Se download Abilitato! )
Ulteriori info:
-
4. ### PIù IMPORTANTE!!! ###
#### NON E' DA CONSIDERARE ERRORE NEL TEST QUANTO RIPORTATO DI SEGUITO!!!! ####
1. Il sito permette un filtro tra anime e film, tramite url.
Se nell'url c'è /anime/, sul titolo e proseguendo fino alla pagina del video, saranno
presenti le voci:
- 'Rinumerazione', prima, e dopo: 'Aggiungi in videoteca', 'Scarica Serie' etc...
Tutto il resto è trattato come film e si avranno le voci solite:
AD eccezione per quei "FILM" che hanno 2 o più titoli all'interno, in questo caso:
1. Non apparirà nessuna voce tra "Aggiungi in videoteca" e "Scarica Film" e nemmeno "rinumerazione"
2. Dopo essere entrato nella pagina del Titolo Principale, troverai una lista di titoli dove sarà possibile scaricare
il filmato (chiamato EPISODIO) stessa cosa accedendo alla pagina ultima del video
3. Questi TITOLI NON POSSONO ESSERE AGGIUNTI IN VIDEOTECA
le voci "Scarica FILM" si avranno dopo.
Es:
https://www.dreamsub.stream/movie/5-centimetri-al-secondo -> film ma ha 3 titoli
Il Canale NON è presente nelle novità(globale) -> Anime
-------------------------------------------------------
NOTA per i DEV:
- Dai risultati dei Menu vengono tolti quei titoli
che non hanno la corrispettiva parola nel link, secondo lo schema seguente:
Menu Parole nel link
---------------------------
OAV oav
OVA ova
Speciali movie
Movie movie
Serie Tutti gli altri casi
Es:
https://www.dreamsub.stream/oav/another-the-other - è un OAV
"""
# Qui gli import
import re
import copy
from core import support
from platformcode import config
##from specials.autorenumber import renumber
from specials import autorenumber
# in caso di necessità
from core import scrapertoolsV2, httptools, servertools, tmdb
from core.item import Item
#from lib import unshortenit
##### fine import
__channel__ = "dreamsub"
@@ -53,7 +56,7 @@ host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
# server di esempio...
list_servers = ['verystream', 'streamango', 'openload', 'directo']
list_servers = ['directo', 'verystream', 'streamango', 'openload']
# quality di esempio
list_quality = ['default']
@@ -63,19 +66,15 @@ list_quality = ['default']
def mainlist(item):
support.log(item)
anime = ['/anime',
## ('Novità', ['']),
## ('OAV', ['/search/oav', 'peliculas', 'oav']),
## ('OVA', ['/search/ova', 'peliculas', 'ova']),
('Movie', ['/search/movie', 'peliculas', 'special']),
('Film', ['/search/film', 'peliculas', 'special']),
('Movie', ['/search/movie', 'peliculas', '', 'movie']),
('Film', ['/search/film', 'peliculas', '', 'movie']),
('Categorie', ['/filter?genere=','genres']),
## ('Ultimi Episodi', ['', 'last'])
]
"""
Eventuali Menu per voci non contemplate!
"""
return locals()
@@ -85,53 +84,37 @@ def peliculas(item):
#dbg # decommentare per attivare web_pdb
anime = True
action = 'episodios'
item.contentType = 'tvshow'
if item.args == 'newest':
patronBlock = r'<div class="showRoomGoLeft" sr="ultime"></div>(?P<block>.*?)<div class="showRoomGoRight" sr="ultime">'
patronBlock = r'<div class="showRoomGoLeft" sr="ultime"></div>(?P<block>.*?)<div class="showRoomGoRight" sr="ultime">'
else:
patronBlock = r'<input type="submit" value="Vai!" class="blueButton">(?P<block>.*?)<div class="footer">'
patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+).+?Stato streaming: '\
'(?:[^<]+)<.*?Lingua:[ ](?P<lang1>ITA\/JAP|ITA|JAP)?(?:[ ])?'\
'(?P<lang2>SUB ITA)?<br>.+?href="(?P<url>[^"]+)".+?'\
'background: url\((?P<thumb>[^"]+)\).+?<div class="tvTitle">.+?'\
'<strong>Anno di inizio</strong>: (?P<year>\d+)<br>'
## patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+).+?Stato streaming: '\
## '(?:[^<]+)<.*?Lingua:[ ](?P<lang1>ITA\/JAP|ITA|JAP)?(?:[ ])?'\
## '(?P<lang2>SUB ITA)?<br>.+?href="(?P<url>[^"]+)".+?'\
## 'background: url\((?P<thumb>[^"]+)\).+?<div class="tvTitle">.+?'\
## '<strong>Anno di inizio</strong>: (?P<year>\d+)<br>'
patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+).+?Stato streaming: (?:[^<]+)<.*?Lingua:[ ](?P<lang1>ITA\/JAP|ITA|JAP)?(?:[ ])?(?P<lang2>SUB ITA)?<br>.+?href="(?P<url>[^"]+)".+?background: url\((?P<thumb>[^"]+)\).+?<div class="tvTitle">.+?Episodi[^>]+>.\s?(?P<nep>\d+).+?<strong>Anno di inizio</strong>: (?P<year>\d+)<br>'
patronNext = '<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">'
def itemHook(item):
support.log("ITEMHOOK -> ", item)
lang = []
if item.lang1 == 'ITA/JAP' or item.lang1 == 'ITA':
lang.append('ITA')
item = language(item)
if item.lang1 == 'JAP' and item.lang2 == 'SUB ITA' or item.lang2 == 'SUB ITA':
lang.append('Sub-ITA')
support.log("ITEMHOOK LANG-> ", lang)
item.contentLanguage = lang
if len(lang) == 2:
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'+' [COLOR 0xFF65B3DA][' + lang[1] + '][/COLOR]'
elif len(lang) == 1 and lang[0] != 'ITA':
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'
# se si riesce a differenziare in qualche modo tramite il link
## if item.args == 'oav':
## if not '/oav/' in url:
## continue
## elif item.args == 'ova':
## if not '/ova/' in url:
## continue
## elif item.args == 'special':
if item.args == 'search' or item.args == 'special':
## if '/movie/' in item.url:
## item.args = 'special'
## if item.args == 'special':
item.action = 'findvideos'
item.contentType = 'movie'
if not '/movie/' in item.url:
pass
if 'anime' in item.url:
item.contentType = 'tvshow'
item.action = 'episodios'
#item.args = 'anime'
else:
if item.nep == '1':
item.contentType = 'movie'
item.action = 'findvideos'
else:
item.contentType = 'episode'
item.args = ''
item.nep = item.nep
item.action = 'findmovie'
return item
#debug = True
@@ -140,36 +123,18 @@ def peliculas(item):
@support.scrape
def episodios(item):
support.log(item)
#dbg
anime = True
## item.contentType = 'episode'
#support.dbg()
action = 'findvideos'
blacklist = ['']
patronBlock = r'<div class="seasonEp">(?P<block>.*?)<div class="footer">'
patron = r'<li><a href="(?P<url>[^"]+)"[^<]+<b>(?:.+?)[ ](?P<episode>\d+)<\/b>[^>]+>(?P<title>[^<]+)<\/i>[ ]\((?P<lang1>ITA)?\s?.+?\s?(?P<lang2>Sub ITA)?.+?\)<\/a>'
pagination = ''
patron = r'<li><a href="(?P<url>[^"]+)"[^<]+<b>(?:.+?)[ ](?P<episode>\d+)<\/b>[^>]+>(?P<title>[^<]+)<\/i>[ ]\(?(?P<lang1>ITA|Sub ITA)?\s?.?\s?(?P<lang2>Sub ITA)?.+?\)?<\/a>'
#UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 18: ordinal not in range(128)
def itemHook(item):
support.log("ITEMHOOK EPISODE LANG1 -> ", item)
lang = []
if item.lang1 == 'ITA':
lang.append('ITA')
if item.lang2 == 'Sub ITA':
lang.append('Sub-ITA')
support.log("ITEMHOOK EPISODE LANG2-> ", lang)
item.contentLanguage = lang
support.log("ITEMHOOK EPISODE LANG3 -> ", item, lang)
if len(lang) ==2:
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'+' [COLOR 0xFF65B3DA][' + lang[1] + '][/COLOR]'
item.show += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'+' [COLOR 0xFF65B3DA][' + lang[1] + '][/COLOR]'
elif len(lang) == 1 and lang[0] != 'ITA':
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'
item.show += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'
item = language(item)
return item
pagination = ''
#debug = True
return locals()
@@ -177,6 +142,7 @@ def episodios(item):
def genres(item):
support.log(item)
#dbg
item.contentType = ''
action = 'peliculas'
blacklist = ['tutti']
@@ -187,15 +153,60 @@ def genres(item):
item.contentTitle = item.contentTitle.replace(' ', '+')
item.url = host+'/filter?genere='+item.contentTitle
return item
#debug = True
return locals()
@support.scrape
def findmovie(item):
support.log(item)
patronBlock = r'<div class="seasonEp">(?P<block>.*?)<div class="footer">'
item.contentType = 'episode'
item.nep = 2
patron = r'<li><a href="(?P<url>[^"]+)"[^>]+>.(?P<title2>.+?)-.+?-[ ]<b>(?P<title>.+?)</b>\s+\(?(?P<lang1>ITA)?\s?(?P<lang2>Sub ITA)?.+?\)?'
def itemHook(item):
item = language(item)
return item
#debug = True
return locals()
def language(item):
lang = []
if item.lang1:
if item.lang1.lower() == 'ita/jap' or item.lang1.lower() == 'ita':
lang.append('ITA')
if item.lang1.lower() == 'jap' and item.lang1.lower() == 'sub ita':
lang.append('Sub-ITA')
if item.lang2:
if item.lang2.lower() == 'sub ita':
lang.append('Sub-ITA')
item.contentLanguage = lang
if len(lang) ==2:
item.title += support.typo(lang[0], '_ [] color kod') + support.typo(lang[1], '_ [] color kod')
#item.show += support.typo(lang[0], '_ [] color kod') + support.typo(lang[1], '_ [] color kod')
elif len(lang) == 1:
item.title += support.typo(lang[0], '_ [] color kod')
#item.show += support.typo(lang[0], '_ [] color kod')
return item
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + '/search/' + text
item.contentType = item.contentType
item.args = 'search'
try:
return peliculas(item)
@@ -206,32 +217,11 @@ def search(item, text):
support.log('search log:', line)
return []
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
item.contentType = 'tvshow'
item.args = 'newest'
try:
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
log('newest log: ', {0}.format(line))
return []
return itemlist
# da adattare... ( support.server ha vari parametri )
#support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True)
def findvideos(item):
support.log()
support.log("ITEM ---->", item)
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -241,9 +231,10 @@ def findvideos(item):
patron = r'href="(.+?)"'
block = scrapertoolsV2.find_single_match(data, patronBlock)
urls = scrapertoolsV2.find_multiple_matches(block, patron)
#support.regexDbg(item, patron, headers, data=data)
for url in urls:
titles = item.infoLabels['title']
lang = ''
if 'sub_ita' in url.lower():
lang = 'Sub-ITA'
@@ -269,25 +260,35 @@ def findvideos(item):
else:
data = httptools.downloadpage(url).data
host_video = scrapertoolsV2.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
#host_video = scrapertoolsV2.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
host_video = scrapertoolsV2.find_single_match(data, r'let thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
link = scrapertoolsV2.find_single_match(data, r'<video src="([^"]+)"')
video_urls = host_video+link
title = support.typo(item.fulltitle,'_ bold') + support.typo(lang,'_ [] color kod')
title_show = support.typo(titles,'_ bold') + support.typo(lang,'_ [] color kod')
itemlist.append(
support.Item(channel=item.channel,
action="play",
contentType=item.contentType,
title=title,
fulltitle=title,
show=title,
title=title_show,
fulltitle=item.fulltitle,
show=item.fulltitle,
url=video_urls,
infoLabels=item.infoLabels,
infoLabels = item.infoLabels,
thumbnail=item.thumbnail,
contentSerieName= item.contentSerieName,
contentTitle=title,
contentSerieName= item.fulltitle,
contentTitle=title_show,
contentLanguage = 'ITA' if lang == [] else lang,
args=item.args,
server='directo',
))
if item.contentType != 'episode' and int(item.nep) < 2 :
# Link Aggiungi alla Libreria
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
support.videolibrary(itemlist, item)
# link per scaricare
if config.get_setting('downloadenabled'):
support.download(itemlist, item)
return itemlist

View File

@@ -58,10 +58,11 @@ def peliculas(item):
if item.args == 'newest':
#patron = r'<span class="serieTitle" style="font-size:20px">(?P<title>.*?).[^][\s]?<a href="(?P<url>[^"]+)"\s+target="_blank">(?P<episode>\d+x\d+-\d+|\d+x\d+) (?P<title2>.*?)[ ]?(?:|\((?P<lang>SUB ITA)\))?</a>'
patron = r'<span class="serieTitle" style="font-size:20px">(?P<title>.*?).[^–][\s]?<a href="(?P<url>[^"]+)"\s+target="_blank">(?:<episode>\d+x\d+-\d+|\d+x\d+) .*?[ ]?\(?(?P<lang>SUB ITA)?\)?</a>'
pagination = ''
else:
patron = r'<div class="post-thumb">.*?\s<img src="(?P<thumb>[^"]+)".*?><a href="(?P<url>[^"]+)"[^>]+>(?P<title>.+?)\s?(?: Serie Tv)?\s?\(?(?P<year>\d{4})?\)?<\/a><\/h2>'
patronNext='a class="next page-numbers" href="?([^>"]+)">Avanti &raquo;</a>'
#debug = True
return locals()
@@ -72,11 +73,20 @@ def episodios(item):
action = 'findvideos'
item.contentType = 'tvshow'
# Carica la pagina
data = pagina(item.url)
data = re.sub('\n|\t', ' ', data)
patronBlock = r'(?P<block>STAGIONE\s\d+ (?:\()?(?P<lang>ITA|SUB ITA)(?:\))?.*?)</div></div>'
patron = r'(?:\s|\Wn)?(?:<strong>|)?(?P<episode>\d+&#\d+;\d+-\d+|\d+&#\d+;\d+)(?:</strong>|)?(?P<title>.+?)(?:|-.+?-|–.+?–|–|.)?<a (?P<url>.*?)<br />'
data1 = pagina(item.url)
data1 = re.sub('\n|\t', ' ', data1)
data = re.sub(r'>\s+<', '> <', data1)
patronBlock = r'(?P<block>STAGIONE\s\d+ (.+?)?(?:\()?(?P<lang>ITA|SUB ITA)(?:\))?.*?)</div></div>'
#patron = r'(?:\s|\Wn)?(?:<strong>|)?(?P<episode>\d+&#\d+;\d+-\d+|\d+&#\d+;\d+)(?:</strong>|)?(?P<title>.+?)(?:|-.+?-|–.+?–|–|.)?<a (?P<url>.*?)<br />'
patron = r'(?:\s|\Wn)?(?:<strong>|)?(?P<episode>\d+&#\d+;\d+-\d+|\d+&#\d+;\d+)(?:</strong>|)?(?P<title>.+?)(?:–|-.+?-|–.+?–|–|.)?(?:<a (?P<url>.*?))?<br />'
def itemHook(item):
if not item.url:
item.title += ' [B][COLOR red]### NO LINK ###[/COLOR][/B]'
return item
#support.regexDbg(item, patronBlock, headers, data)
#debug = True
return locals()
def pagina(url):
@@ -84,9 +94,6 @@ def pagina(url):
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
#support.log("DATA ----###----> ", data)
## if 'class="menu-item menu-item-type-taxonomy menu-item-object-category '\
## 'current-post-ancestor current-menu-parent current-post-parent menu-item-4529"' in data.lower():
## item.args = 'anime'
if 'clicca qui per aprire' in data.lower():
url = scrapertoolsV2.find_single_match(data, '"go_to":"([^"]+)"')
url = url.replace("\\","")

View File

@@ -7,14 +7,6 @@
"thumbnail": "fastsubita.png",
"banner": "fastsubita.png",
"categories": ["tvshow", "vos"],
"settings": [
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
"not_active": ["include_in_newest_peliculas", "include_in_newest_anime", "include_in_newest_italiano"],
"settings": []
}

View File

@@ -1,201 +1,111 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per fastsubita
# Thanks Icarus crew & Alfa addon & 4l3x87
# Canale per fastsubita.py
# ------------------------------------------------------------
"""
Problemi noti che non superano il test del canale:
- indicare i problemi
Su questo canale, nella categoria 'Ricerca Globale'
non saranno presenti le voci 'Aggiungi alla Videoteca'
e 'Scarica Film'/'Scarica Serie', dunque,
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Avvisi:
- Eventuali avvisi per i tester
Novità. Indicare in quale/i sezione/i è presente il canale:
- serie
Ulteriori info:
- SOLO SUB-ITA
---------------------------------------------------
Per i DEV:
- nella ricerca, anche globale, esce la voce "successivo"
ma apre la maschera per l'inserimento testo
"""
from core import support, httptools, scrapertoolsV2, tmdb
from core import support, httptools, scrapertoolsV2
from core.item import Item
from core.support import log
from platformcode import config #, logger
from platformcode import config
__channel__ = 'fastsubita'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
##IDIOMAS = {'Italiano': 'IT'}
##list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome']
list_quality = ['default']
PERPAGE = 15
@support.menu
def mainlist(item):
tvshow = ['',
('Archivio A-Z ', ['', 'list_az'])
Tvshow = [
('Aggiornamenti', ['', 'peliculas', '', 'tvshow']),
('Per Lettera', ['/elenco-serie-tv/', 'genres', 'genres'])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.log(item)
#dbg # decommentare per attivare web_pdb
#support.dbg()
deflang = 'Sub-ITA'
action = 'findvideos'
blacklist = ['']
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">(?P<lang>Sub-ITA)?'
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)<nav class="navigation pagination" role="navigation">'
if item.args == 'genres':
patronBlock = r'<h4 id="mctm1-.">'+item.fulltitle+'</h4>(?P<block>.+?)</div>'
patron = r'[^>]+>[^>]+>.+?href="(?P<url>[^"]+)[^>]>(?P<title>[^<]+)\s<'
action = 'episodios'
elif item.args == 'search':
patronBlock = r'</h1> </header>(?P<block>.*?)</main>'
patron = r'(?:<img src="(?P<thumb>[^"]+)"[^>]+>)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="(?P<url>[^"]+)"[^>]+>(?:(?P<title>.+?)[ ](?P<episode>[\d&#;\d]+\d+|\d+..\d+)(?: \([a-zA-Z\s]+\) )(?:s\d+e\d+)?[ ]?(?:[&#\d;|.{3}]+)(?P<title2>[^&#\d;|^.{3}]+)(?:|.+?))<'
else:
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">'
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)<nav class="navigation pagination" role="navigation">'
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
def itemHook(item):
if item.args == 'newest':
item.show = item.title# + support.typo('Sub-ITA', '_ [] color kod')
return item
## debug = True # True per testare le regex sul sito
#debug = True
return locals()
@support.scrape
def episodios(item):
support.log(item)
#dbg
item.args = 'episodios'
#support.dbg()
deflang = 'Sub-ITA'
action = 'findvideos'
blacklist = ['']
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">(?P<lang>Sub-ITA)?'
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+|\d+[×.]+\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">'
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)</main>'
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
## debug = True
#debug = True
return locals()
@support.scrape
def genres(item):
support.log()
#support.dbg()
action = 'peliculas'
patronBlock = r'<div id="mcTagMapNav">(?P<block>.+?)</div>'
patron = r'<a href="(?P<url>[^"]+)">(?P<title>.+?)</a>'
def itemHook(item):
item.url = host+'/elenco-serie-tv/'
item.contentType = 'tvshow'
return item
#debug = True
return locals()
def list_az(item):
log()
itemlist = []
alphabet = dict()
for i, (scrapedurl, scrapedtitle) in enumerate(serietv()):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(str(scrapedurl) + '||' + str(scrapedtitle))
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist
def cleantitle(scrapedtitle):
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('', '\'').replace('&#215;', 'x').replace('×', 'x').replace('"', "'")
return scrapedtitle.strip()
def serietv():
log()
itemlist = []
matches = support.match(Item(), r'<option class="level-0" value="([^"]+)">([^<]+)</option>',
r'<select\s*?name="cat"\s*?id="cat"\s*?class="postform"\s*?>(.*?)</select>', headers,
url="%s/" % host)[0]
index = 0
for cat, title in matches:
title = cleantitle(title)
url = '%s?cat=%s' % (host, cat)
## if int(level) > 0:
## itemlist[index - 1][0] += '{|}' + url
## continue
itemlist.append([url, title])
index += 1
return itemlist
def lista_serie(item):
log()
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
if '||' in item.url:
series = item.url.split('\n\n')
matches = []
for i, serie in enumerate(series):
matches.append(serie.decode('utf-8').split('||'))
series = matches
support.log("SERIE ALF :", series)
else:
series = serietv()
support.log("SERIE ALF 2 :", series)
for i, (scrapedurl, scrapedtitle) in enumerate(series):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(
Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scrapedtitle,
extra=item.extra,
contentType='tvshow',
originalUrl=scrapedurl,
folder=True))
support.checkHost(item, itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(series) >= p * PERPAGE:
next_page = item.url + '{}' + str(p + 1)
support.nextPage(itemlist, item, next_page=next_page)
return itemlist
############## Fondo Pagina
# da adattare al canale
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + '?s=' + text
# bisogna inserire item.contentType per la ricerca globale
# se il canale è solo film, si può omettere, altrimenti bisgona aggiungerlo e discriminare.
try:
item.args = 'search'
item.contentType = 'tvshow'
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
@@ -206,34 +116,33 @@ def search(item, text):
return []
# da adattare al canale
# inserire newest solo se il sito ha la pagina con le ultime novità/aggiunte
# altrimenti NON inserirlo
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
try:
item.contentType = 'tvshow'
item.args = 'newest'
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if categoria == 'series':
try:
item.contentType = 'tvshow'
item.args = 'newest'
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
return []
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
return []
return itemlist
def findvideos(item):
support.log('findvideos ->', item)
itemlist = []
patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">'
patron = r'<a href="([^"]+)">'
matches, data = support.match(item, patron, patronBlock, headers)
@@ -245,4 +154,24 @@ def findvideos(item):
resp = httptools.downloadpage(scrapedurl, follow_redirects=False)
data += resp.headers.get("location", "") + '\n'
return support.server(item, data)
itemlist += support.server(item, data)
data = httptools.downloadpage(item.url).data
patron = r'>Posted in <a href="https?://fastsubita.com/serietv/([^/]+)/(?:[^"]+)?"'
series = scrapertoolsV2.find_single_match(data, patron)
titles = support.typo(series.upper().replace('-', ' '), 'bold color kod')
goseries = support.typo("Vai alla Serie:", ' bold color kod')
itemlist.append(
Item(channel=item.channel,
title=goseries + titles,
fulltitle=titles,
show=series,
contentType='tvshow',
contentSerieName=series,
url=host+"/serietv/"+series,
action='episodios',
contentTitle=titles,
plot = "Vai alla Serie " + titles + " con tutte le puntate",
))
return itemlist

View File

@@ -3,68 +3,34 @@
"name": "Filmi Gratis",
"active": true,
"adult": false,
"language": ["ita"],
"language": ["ita", "sub-ita"],
"thumbnail": "filmigratis.png",
"banner": "filmigratis.png",
"categories": ["movie","tvshow"],
"categories": ["movie","tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "2", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -2,293 +2,156 @@
# ------------------------------------------------------------
# Canale per Filmi Gratis
# ------------------------------------------------------------
"""
La voce "Al cinema" si riferisce ai titoli che scorrono nella home page
Problemi:
- Nessuno noto
Novità, il canale, è presente in:
- FILM
"""
import re
from core import scrapertools, servertools, httptools, tmdb, support
from core import servertools, httptools, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
from platformcode import config
__channel__ = 'filmigratis'
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'vidoza', 'okru']
list_quality = ['1080p', '720p', '480p', '360']
checklinks = config.get_setting('checklinks', 'filmigratis')
checklinks_number = config.get_setting('checklinks_number', 'filmigratis')
list_servers = ['verystream', 'openload', 'streamango', 'vidoza', 'okru']
list_quality = ['1080p', '720p', '480p', '360']
headers = [['Referer', host]]
#-----------------------------------------------------------------------------------------------------------------------
@support.menu
def mainlist(item):
film = [
('Al Cinema ', ['', 'carousel']),
('Film alta definizione', ['', 'peliculas']),
('Categorie', ['', 'categorias_film']),
('Al Cinema ', ['', 'peliculas', 'cinema']),
('Categorie', ['', 'genres', 'genres']),
]
tvshow = [
('Categorie', ['', 'categorias_serie'])
tvshow = ['/serie/ALL',
('Generi', ['', 'genres', 'genres'])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.log()
if item.args == 'search':
action = ''
patron = r'<div class="cnt">.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>\s+(?P<title>.+?)(?:\[(?P<lang>Sub-ITA|SUB-ITA|SUB)\])?\s?(?:\[?(?P<quality>HD).+\]?)?\s?(?:\(?(?P<year>\d+)?\)?)?\s+<[^>]+>[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)"[^<]+<'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
elif item.contentType == 'movie':
if not item.args:
# voce menu: Film
patronBlock = r'<h1>Film streaming ita in alta definizione</h1>(?P<block>.*?)<div class="content-sidebar">'
patron = r'<div class="timeline-right">[^>]+>\s<a href="(?P<url>.*?)".*?src="(?P<thumb>.*?)".*?<h3 class="timeline-post-title">(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<'
patronNext = r'<a class="page-link" href="([^"]+)">>'
elif item.args == 'cinema':
patronBlock = r'<div class="owl-carousel" id="postCarousel">(?P<block>.*?)<section class="main-content">'
patron = r'background-image: url\((?P<thumb>.*?)\).*?<h3.*?>(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<.+?<a.+?<a href="(?P<url>[^"]+)"[^>]+>'
elif item.args == 'genres':
# ci sono dei titoli dove ' viene sostituito con " da support
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub('\n|\t', ' ', data)
patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
pagination = ''
patronNext = '<a class="page-link" href="([^"]+)">>>'
else:
action = 'episodios'
patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"'
## if item.args == 'search':
## patron = r'<div class="cnt">.*?src="([^"]+)".+?[^>]+>[^>]+>[^>]+>\s+((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)\s+<[^>]+>[^>]+>[^>]+>[ ]<a href="(?P<url>[^"]+)"'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
def itemHook(item):
if item.args == 'search':
if 'series' in item.url:
item.action = 'episodios'
item.contentType = 'tvshow'
else:
item.action = 'findvideos'
item.contentType = 'movie'
return item
#debug = True
return locals()
@support.scrape
def episodios(item):
support.log()
action = 'findvideos'
patronBlock = r'<div class="row">(?P<block>.*?)<section class="main-content">'
patron = r'href="(?P<url>.*?)">(?:.+?)?\s+S(?P<season>\d+)\s\-\sEP\s(?P<episode>\d+)[^<]+<'
return locals()
#-----------------------------------------------------------------------------------------------------------------------
def carousel(item):
logger.info('[filmigratis.py] carousel')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<div class="owl-carousel" id="postCarousel">(.*?)<section class="main-content">')
patron = r'background-image: url\((.*?)\).*?<h3.*?>(.*?)<.*?<a.*?<a href="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedthumb, scrapedtitle, scrapedurl, in matches:
itemlist.append(
Item(channel=item.channel,
action = "findvideos",
contentType = item.contentType,
title = scrapedtitle,
fulltitle = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumb,
args=item.args,
show = scrapedtitle,))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas(item):
logger.info('[filmigratis.py] peliculas')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<h1>Film streaming ita in alta definizione</h1>(.*?)<div class="content-sidebar">')
patron = r'<div class="timeline-left-wrapper">.*?<a href="(.*?)".*?src="(.*?)".*?<h3.*?>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedthumb, scrapedtitle, in matches:
itemlist.append(
Item(channel=item.channel,
action = "findvideos",
contentType = item.contentType,
title = scrapedtitle,
fulltitle = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumb,
args=item.args,
show = scrapedtitle))
patron = r'class="nextpostslink".*?href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[B]" + config.get_localized_string(30992) + "[/B]",
args=item.args,
url=next_page))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def categorias_film(item):
logger.info("[filmigratis.py] categorias_film")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'CATEGORIES.*?<ul>(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action="peliculas_categorias",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
args=item.args,
thumbnail=""))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def categorias_serie(item):
logger.info("[filmigratis.py] categorias_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'class="material-button submenu-toggle"> SERIE TV.*?<ul>.*?</li>(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
contentType='tvshow',
action="peliculas_serie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
args=item.args,
thumbnail=""))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas_categorias(item):
logger.info("[filmigratis.py] peliculas_categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="cnt">.*?src="(.*?)".*?title="([A-Z|0-9].*?)".*?<a href="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
if scrapedtitle == "":
scrapedtitle = scrapertools.find_single_match(data, r'<small>.*?([A-Z|0-9].*?) <')
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("È","È")
scrapedtitle = scrapedtitle.replace("–", "-")
scrapedtitle = scrapedtitle.replace("’", "'")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
args=item.args,
show=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas_serie(item):
logger.info("[filmigratis.py] peliculas_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'div class="cnt">[^s]+src="([^"]+).*?small>\s+[^A-Z](.*?)<.*?<a href="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
if scrapedtitle == "":
scrapedtitle = scrapertools.find_single_match(data, r'<small>.*?([A-Z|0-9].*?) <')
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("È","È")
scrapedtitle = scrapedtitle.replace("–", "-")
scrapedtitle = scrapedtitle.replace("’", "'")
scrapedtitle = scrapedtitle.replace(" ", "")
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType='tvshow',
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
args=item.args,
show=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def episodios(item):
logger.info("[filmigratis.py] episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="row">(.*?)<section class="main-content">')
patron = r'href="(.*?)".*?(S[^<]+) <'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("S0", "")
scrapedtitle = scrapedtitle.replace(" - EP ", "x")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumb,
args=item.args,
show=item.title))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.videolibrary(itemlist, item, 'color kod')
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info('[filmigratis.py] search')
item.url = host + '/search/?s=' + texto
if item.args == 'serie':
try:
return peliculas_serie(item)
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
@support.scrape
def genres(item):
support.log()
if item.contentType == 'movie':
action = 'peliculas'
patron = r'<a href="(?P<url>.*?)">(?P<title>.*?)<'
patronBlock = r'CATEGORIES.*?<ul>(?P<block>.*?)</ul>'
else:
try:
return peliculas_categorias(item)
item.contentType = 'tvshow'
action = 'peliculas'
blacklist = ['Al-Cinema']
patron = r'<a href="(?P<url>.*?)">(?P<title>.*?)<'
patronBlock = r'class="material-button submenu-toggle"> SERIE TV.*?<ul>.*?</li>(?P<block>.*?)</ul>'
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
return locals()
#-----------------------------------------------------------------------------------------------------------------------
def search(item, text):
support.log('search', item)
text = text.replace(' ', '+')
item.url = host + '/search/?s=' + text
try:
item.args = 'search'
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
log('search log:', line)
return []
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
item.contentType = 'movie'
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log({0}.format(line))
return []
return itemlist
def findvideos(item):
logger.info('[filmigratis.py] findvideos')
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + '[COLOR green][B] - ' + videoitem.title + '[/B][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.content
if item.args == "film":
support.videolibrary(itemlist, item, 'color kod')
autoplay.start(itemlist, item)
return itemlist
support.log()
return support.server(item)

View File

@@ -3,7 +3,7 @@
"name": "Filmpertutti",
"active": true,
"adult": false,
"language": ["ita"],
"language": ["ita", "sub-ita"],
"thumbnail": "filmpertutti.png",
"banner": "filmpertutti.png",
"categories": ["tvshow","movie"],

View File

@@ -1,235 +1,184 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per filmpertutti.co
# Canale per filmpertutti.py
# ------------------------------------------------------------
"""
Questi sono commenti per i beta-tester.
Su questo canale, nella categoria 'Ricerca Globale'
non saranno presenti le voci 'Aggiungi alla Videoteca'
e 'Scarica Film'/'Scarica Serie', dunque,
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Novità (globale). Indicare in quale/i sezione/i è presente il canale:
- film, serie
- I titoli in questa sezione a gruppi di 20
"""
import re
from channelselector import thumb
from core import scrapertoolsV2, servertools, httptools, tmdb, support
from core import scrapertoolsV2, httptools, support
from core.item import Item
from platformcode import config, logger
from specials import autoplay
from platformcode import config
__channel__ = 'filmpertutti'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
list_servers = ['verystream', 'openload', 'streamango', 'wstream', 'akvideo']
list_servers = ['speedvideo', 'verystream', 'openload', 'streamango', 'wstream', 'akvideo']
list_quality = ['HD', 'SD']
@support.menu
def mainlist(item):
film = ['/category/film/',
('Film per Genere', ['', 'genre'])
]
tvshow = ['/category/serie-tv/',
('in ordine alfabetico', ['/category/serie-tv/', 'az'])
]
film = ['/category/film/',
('Generi', ['/category/film/', 'genres', 'lettersF'])
]
tvshow = ['/category/serie-tv/',
('Aggiornamenti', ['/aggiornamenti-serie-tv/', 'peliculas', 'newest']),
('Per Lettera', ['/category/serie-tv/', 'genres', 'lettersS'])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.log()
if item.args != 'newest':
patronBlock = r'<ul class="posts">(?P<block>.*)<\/ul>'
patron = r'<li><a href="(?P<url>[^"]+)" data-thumbnail="(?P<thumb>[^"]+)">.*?<div class="title">(?P<title>.+?)(?:\[(?P<lang>Sub-ITA)\])?(?:[ ]\[?(?P<quality>[HD]+)?\])?(?:[ ]\((?P<year>\d+)\)?)?<\/div>'
patronNext = r'<a href="([^"]+)" >Pagina'
else:
patronBlock = r'<ul class="posts">(?P<block>.*)<div class="clear">'
patron = r'<li>\s?<a href="(?P<url>[^"]+)" data-thumbnail="(?P<thumb>[^"]+)">.*?<div class="title">(?P<title>.+?)(?:\s\[(?P<quality>HD)\])?<\/div>[^>]+>(?:[\dx]+)\s?(?:[ ]\((?P<lang>[a-zA-Z\-]+)\))?.+?</div>'
pagination = ''
if item.args == 'search':
action = 'select'
elif item.contentType == 'tvshow':
action = 'episodios'
elif item.contentType == 'movie':
action ='findvideos'
else:
action = 'select'
def itemHook(item):
item.title = item.title.replace(r'-', ' ')
return item
#debug = True
return locals()
@support.scrape
def episodios(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
if 'accordion-item' in data:
#patronBlock = r'<span class="season(?:|-title)">(?P<season>\d+)[^>]+>[^>]+>\s+?[^>]+>[^>]+>.+?(?:STAGIONE|Stagione).+?\s(?P<lang>[a-zA-Z\-]+).+?</span>(?P<block>.*?)<div id="disqus_thread">'
patronBlock = r'<span class="season(?:|-title)">(?P<season>\d+)[^>]+>[^>]+>\s+?[^>]+>[^>]+>.+?(?:STAGIONE|Stagione).+?\s(?P<lang>[a-zA-Z\-]+)</span>(?P<block>.*?)\s*(?:<li class="s_title">|<div id="disqus_thread">)'
patron = r'<img src="(?P<thumb>[^"]+)">.*?<li class="season-no">(?P<episode>.*?)<\/li>(?P<url>.*?javascript:;">(?P<title>[^<]+)<.+?)<\/table>'
else:
patronBlock = r'<div id="info" class="pad">(?P<block>.*?)<div id="disqus_thread">'
patron = r'<strong>(?P<lang>.*?)<\/strong>.*?<p>(?P<season>.*?)<span'
#debug = True
return locals()
def newest(categoria):
logger.info("filmpertutti newest" + categoria)
@support.scrape
def genres(item):
support.log()
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host + "/category/film/"
item.action = "peliculas"
item.extra = "movie"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if item.args == 'lettersF':
item.contentType = 'movie'
else:
item.contentType = 'tvshow'
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
action = 'peliculas'
patronBlock = r'<select class="cats">(?P<block>.*?)<\/select>'
patron = r'<option data-src="(?P<url>[^"]+)">(?P<title>.*?)<\/option>'
return itemlist
return locals()
def select(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
patronBlock = scrapertoolsV2.find_single_match(data, r'class="taxonomy category" ><span property="name">(.*?)</span></a><meta property="position" content="2">')
if patronBlock.lower() != 'film':
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
contentSerieName = item.fulltitle,
url=item.url,
contentType='tvshow'))
else:
support.log('select = ### è un movie ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
def search(item, texto):
logger.info("filmpertutti " + item.url + " search " + texto)
support.log()
item.url = host + "/?s=" + texto
item.contentType = 'episode'
item.args = 'search'
try:
return peliculas(item)
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.log("%s" % line)
return []
def genre(item):
logger.info(item.channel + 'genre')
def newest(categoria):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<ul class="table-list">(.*?)<\/ul>')
matches = scrapertoolsV2.find_multiple_matches(block, r'<a href="([^"]+)">.*?<\/span>(.*?)<\/a>')
for url, title in matches:
itemlist.append(
Item(channel=item.channel,
action='peliculas',
title=title,
url=host+url)
)
itemlist = thumb(itemlist)
return itemlist
def az(item):
logger.info(item.channel + 'genre')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<select class="cats">(.*?)<\/select>')
matches = scrapertoolsV2.find_multiple_matches(block, r'<option data-src="([^"]+)">(.*?)<\/option>')
for url, title in matches:
itemlist.append(
Item(channel=item.channel,
action='peliculas',
title=title,
url=url)
)
itemlist = thumb(itemlist)
return itemlist
def peliculas(item):
logger.info(item.channel + 'peliculas')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<ul class="posts">(.*)<\/ul>')
patron = r'<li><a href="([^"]+)" data-thumbnail="([^"]+)">.*?<div class="title">([^<]+)<\/div>'
matches = scrapertoolsV2.find_multiple_matches(block, patron)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
title = re.sub(r'.\(.*?\)|.\[.*?\]', '', scrapedtitle)
quality = scrapertoolsV2.find_single_match(scrapedtitle, r'\[(.*?)\]')
if not quality:
quality = 'SD'
longtitle = title + ' [COLOR blue][' + quality + '][/COLOR]'
if item.contentType == 'tvshow':
action = 'episodios'
item = Item()
try:
if categoria == "peliculas":
item.url = host + "/category/film/"
item.action = "peliculas"
item.extra = "movie"
item.contentType = 'movie'
itemlist = peliculas(item)
else:
action ='findvideos'
item.url = host + "/aggiornamenti-serie-tv/"
item.action = "peliculas"
item.args = "newest"
item.contentType = 'tvshow'
itemlist = peliculas(item)
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
quality=quality,
url=scrapedurl,
thumbnail=scrapedthumb
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## if itemlist[-1].action == "peliculas":
## itemlist.pop()
next_page = scrapertoolsV2.find_single_match(data, '<a href="([^"]+)">Pagina')
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
contentType=item.contentType,
title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]",
url=next_page,
thumbnails=thumb()))
return itemlist
def episodios(item):
logger.info(item.channel + 'findvideos')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
if 'accordion-item' in data:
block = scrapertoolsV2.find_single_match(data, 'accordion-item.*?>(.*?)<div id="disqus_thread">')
patron = r'<img src="([^"]+)">.*?<li class="season-no">(.*?)<\/li>(.*?)<\/table>'
matches = scrapertoolsV2.find_multiple_matches(block, patron)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
title = scrapedtitle + ' - ' + item.title
if title[0] == 'x':
title = '1' + title
itemlist.append(
Item(channel=item.channel,
action='findvideos',
contentType='episode',
title=title,
fulltitle=title,
show=title,
quality=item.quality,
url=scrapedurl,
thumbnail=scrapedthumb
))
else:
block = scrapertoolsV2.find_single_match(data, '<div id="info" class="pad">(.*?)<div id="disqus_thread">').replace('</p>','<br />').replace('×','x')
matches = scrapertoolsV2.find_multiple_matches(block, r'<strong>(.*?)<\/strong>.*?<p>(.*?)<span')
for lang, seasons in matches:
lang = re.sub('.*?Stagione[^a-zA-Z]+', '', lang)
# patron = r'([0-9]+x[0-9]+) (.*?)<br'
season = scrapertoolsV2.find_multiple_matches(seasons, r'([0-9]+x[0-9]+) (.*?)<br')
for title, url in season:
title = title + ' - ' + lang
itemlist.append(
Item(channel=item.channel,
title=title,
fulltitle=title,
show=title,
url=url,
contentType='episodie',
action='findvideos'
))
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log("{0}".format(line))
return []
return itemlist
def findvideos(item):
logger.info(item.channel + 'findvideos')
if item.contentType == 'movie':
data = httptools.downloadpage(item.url, headers=headers).data
return support.server(item)
else:
data = item.url
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + ' - [COLOR limegreen][[/COLOR]' + videoitem.title + '[COLOR limegreen]][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.quality = item.quality
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]'+config.get_localized_string(30161)+'[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.fulltitle))
return itemlist
return support.server(item, item.url)

View File

@@ -7,5 +7,6 @@
"thumbnail": "https:\/\/mondoserietv.com\/wp-content\/uploads\/2018\/04\/logo.png",
"bannermenu": "https:\/\/mondoserietv.com\/wp-content\/uploads\/2018\/04\/logo.png",
"categories": ["movie","anime","tvshow","documentary"],
"not_active":["include_in_newest_anime","include_in_newest_documentary"],
"settings": []
}

View File

@@ -10,7 +10,7 @@ host = support.config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['akstream']
list_servers = ['akstream', 'wstream', 'vidtome', 'backin', 'nowvideo', 'verystream']
list_quality = ['default']
headers = {'Referer': host}
@@ -18,11 +18,13 @@ headers = {'Referer': host}
@support.menu
def mainlist(item):
film =['/lista-film']
film = ['/lista-film',
('Ultimi Film Aggiunti', ['/ultimi-film-aggiunti', 'peliculas' , 'last'])]
tvshow = ['/lista-serie-tv',
('HD {TV}', ['/lista-serie-tv-in-altadefinizione']),
('Anni 50 60 70 80 {TV}',['/lista-serie-tv-anni-60-70-80'])]
('Anni 50 60 70 80 {TV}',['/lista-serie-tv-anni-60-70-80']),
('Serie Italiane',['/lista-serie-tv-italiane'])]
anime = ['/lista-cartoni-animati-e-anime']
@@ -31,11 +33,21 @@ def mainlist(item):
return locals()
@support.scrape
def search(item, text):
support.log(text)
if item.contentType == 'movie' or item.extra == 'movie':
action = 'findvideos'
else:
action = 'episodios'
try:
item.search = text
return peliculas(item)
search = text
data = support.match(item, headers=headers)[1]
if 'lcp_nextlink' in data:
data += support.match(item, url=support.scrapertoolsV2.find_single_match(data, r'href="([^"]+)" title="[^"]+" class="lcp_nextlink"'), headers=headers)[1]
patron = r'<li><a href="(?P<url>[^"]+)" title="(?P<title>.*?)(?:\s(?P<year>\d{4}))?"[^>]*>'
return locals()
# Continua la ricerca in caso di errore
except:
@@ -45,12 +57,41 @@ def search(item, text):
return []
def newest(categoria):
support.log(categoria)
item = support.Item()
try:
if categoria == "series":
item.contentType= 'tvshow'
item.url = host + '/ultimi-episodi-aggiunti'
item.args = "lastep"
if categoria == "peliculas":
item.contentType= 'movie'
item.url = host + '/ultimi-film-aggiunti'
item.args = "last"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
@support.scrape
def peliculas(item):
pagination = ''
search = item.search
patronBlock = r'<div class="entry-content pagess">(?P<block>.*?)</ul>'
patron = r'<li><a href="(?P<url>[^"]+)" title="(?P<title>.*?)(?:\s(?P<year>\d{4}))?"[^>]*>'
patronNext = r'href="([^"]+)" title="[^"]+" class="lcp_nextlink"'
if item.args == 'last':
patronBlock = r'<table>(?P<block>.*?)</table>'
patron = r'<tr><td><a href="(?P<url>[^"]+)">\s*[^>]+>(?P<title>.*?)(?:\s(?P<year>\d{4}))? (?:Streaming|</b>)'
elif item.args == 'lastep':
patronBlock = r'<table>(?P<block>.*?)</table>'
patron = r'<td>\s*<a href="[^>]+>(?P<title>.*?)(?:\s(?P<year>\d{4}))?\s(?:(?P<episode>(?:\d+x\d+|\d+)))\s*(?P<title2>[^<]+)(?P<url>.*?)<tr>'
action = 'findvideos'
else:
patronBlock = r'<div class="entry-content pagess">(?P<block>.*?)</ul>'
patron = r'<li><a href="(?P<url>[^"]+)" title="(?P<title>.*?)(?:\s(?P<year>\d{4}))?"[^>]*>'
if item.contentType == 'tvshow':
action = 'episodios'
anime = True
@@ -60,8 +101,9 @@ def peliculas(item):
@support.scrape
def episodios(item):
anime = True
pagination = 50
patronBlock = r'<table>(?P<block>.*?)</table>'
patron = r'<tr><td><b>(?:\d+)?.*?(?:(?P<episode>(?:\d+x\d+|\d+)))\s*(?P<title>[^<]+)(?P<url>.*?)<tr>'
patron = r'<tr><td><b>(?P<title>(?:\d+)?.*?)\s*(?:(?P<episode>(?:\d+x\d+|\d+)))\s*(?P<title2>[^<]+)(?P<url>.*?)<tr>'
def itemHook(item):
clear = support.re.sub(r'\[[^\]]+\]', '', item.title)
if clear.isdigit():
@@ -70,292 +112,5 @@ def episodios(item):
return locals()
def findvideos(item):
return support.server(item, item.url)
# def search(item, texto):
# logger.info("kod.mondoserietv search " + texto)
# item.url = "%s/?s=%s" % (host, texto)
# try:
# if item.extra == "movie":
# return search_peliculas(item)
# if item.extra == "tvshow":
# return search_peliculas_tv(item)
# # Continua la ricerca in caso di errore
# except:
# import sys
# for line in sys.exc_info():
# logger.error("%s" % line)
# return []
# def search_peliculas(item):
# logger.info("kod.mondoserietv search_peliculas")
# itemlist = []
# # Carica la pagina
# data = httptools.downloadpage(item.url, headers=headers).data
# # Estrae i contenuti
# patron = '<div class="boxinfo">\s*<a href="([^"]+)">\s*<span class="tt">(.*?)</span>'
# matches = re.compile(patron, re.DOTALL).findall(data)
# for scrapedurl, scrapedtitle in matches:
# scrapedplot = ""
# scrapedthumbnail = ""
# scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
# itemlist.append(
# Item(channel=item.channel,
# action="findvideos",
# fulltitle=scrapedtitle,
# show=scrapedtitle,
# title=scrapedtitle,
# url=scrapedurl,
# thumbnail=scrapedthumbnail,
# plot=scrapedplot,
# extra=item.extra,
# folder=True))
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# return itemlist
# def search_peliculas_tv(item):
# logger.info("kod.mondoserietv search_peliculas_tv")
# itemlist = []
# # Carica la pagina
# data = httptools.downloadpage(item.url, headers=headers).data
# # Estrae i contenuti
# patron = '<div class="boxinfo">\s*<a href="([^"]+)">\s*<span class="tt">(.*?)</span>'
# matches = re.compile(patron, re.DOTALL).findall(data)
# for scrapedurl, scrapedtitle in matches:
# scrapedplot = ""
# scrapedthumbnail = ""
# scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
# itemlist.append(
# Item(channel=item.channel,
# action="episodios",
# fulltitle=scrapedtitle,
# show=scrapedtitle,
# title=scrapedtitle,
# url=scrapedurl,
# thumbnail=scrapedthumbnail,
# plot=scrapedplot,
# extra=item.extra,
# folder=True))
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# return itemlist
# def peliculas(item):
# logger.info("kod.mondoserietv film")
# itemlist = []
# p = 1
# if '{}' in item.url:
# item.url, p = item.url.split('{}')
# p = int(p)
# data = httptools.downloadpage(item.url, headers=headers).data
# blocco = scrapertools.find_single_match(data, '<div class="entry-content pagess">(.*?)</ul>')
# patron = r'<a href="(.*?)" title="(.*?)">'
# matches = re.compile(patron, re.DOTALL).findall(blocco)
# for i, (scrapedurl, scrapedtitle) in enumerate(matches):
# if (p - 1) * PERPAGE > i: continue
# if i >= p * PERPAGE: break
# scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
# itemlist.append(Item(channel=item.channel,
# contentType="movie",
# action="findvideos",
# title=scrapedtitle,
# fulltitle=scrapedtitle,
# url=scrapedurl,
# fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
# show=item.fulltitle,
# folder=True))
# if len(matches) >= p * PERPAGE:
# scrapedurl = item.url + '{}' + str(p + 1)
# itemlist.append(
# Item(channel=item.channel,
# extra=item.extra,
# action="peliculas",
# title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
# url=scrapedurl,
# thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
# folder=True))
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# return itemlist
# def lista_serie(item):
# logger.info("kod.mondoserietv novità")
# itemlist = []
# p = 1
# if '{}' in item.url:
# item.url, p = item.url.split('{}')
# p = int(p)
# data = httptools.downloadpage(item.url, headers=headers).data
# blocco = scrapertools.find_single_match(data, '<div class="entry-content pagess">(.*?)</ul>')
# patron = r'<a href="(.*?)" title="(.*?)">'
# matches = re.compile(patron, re.DOTALL).findall(blocco)
# scrapertools.printMatches(matches)
# for i, (scrapedurl, scrapedtitle) in enumerate(matches):
# if (p - 1) * PERPAGE > i: continue
# if i >= p * PERPAGE: break
# scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
# itemlist.append(Item(channel=item.channel,
# action="episodios",
# title=scrapedtitle,
# fulltitle=scrapedtitle,
# url=scrapedurl,
# fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
# show=item.fulltitle,
# folder=True))
# if len(matches) >= p * PERPAGE:
# scrapedurl = item.url + '{}' + str(p + 1)
# itemlist.append(
# Item(channel=item.channel,
# extra=item.extra,
# action="lista_serie",
# title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
# url=scrapedurl,
# thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
# folder=True))
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# return itemlist
# def episodios(item):
# logger.info("kod.mondoserietv episodios")
# itemlist = []
# data = httptools.downloadpage(item.url, headers=headers).data
# blocco = scrapertools.find_single_match(data, '<table>(.*?)</table>')
# patron = "<tr><td><b>(.*?)(\d+)((?:x\d+| ))(.*?)<\/b>(.*?<tr>)"
# matches = scrapertoolsV2.find_multiple_matches(blocco, patron)
# for t1, s, e, t2, scrapedurl in matches:
# if "x" not in e:
# e = s
# if e == s:
# s = None
# if s is None:
# s = "1"
# if s.startswith('0'):
# s = s.replace("0", "")
# if e.startswith('x'):
# e = e.replace("x", "")
# scrapedtitle = t1 + s + "x" + e + " " + t2
# itemlist.append(
# Item(channel=item.channel,
# contentType="episode",
# action="findvideos",
# items=s,
# iteme=e,
# fulltitle=scrapedtitle,
# show=scrapedtitle,
# title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
# url=scrapedurl,
# thumbnail=item.scrapedthumbnail,
# plot=item.scrapedplot,
# folder=True))
# if config.get_videolibrary_support() and len(itemlist) != 0:
# itemlist.append(
# Item(channel=item.channel,
# title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
# url=item.url,
# action="add_serie_to_library",
# extra="episodios",
# show=item.show))
# return itemlist
# def findvideos(item):
# logger.info(" findvideos")
# if item.contentType != "episode":
# return findvideos_movie(item)
# itemlist = servertools.find_video_items(data=item.url)
# logger.info(itemlist)
# for videoitem in itemlist:
# videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
# videoitem.fulltitle = item.fulltitle
# videoitem.thumbnail = item.thumbnail
# videoitem.show = item.show
# videoitem.plot = item.plot
# videoitem.channel = item.channel
# videoitem.contentType = item.contentType
# videoitem.language = IDIOMAS['Italiano']
# # Requerido para Filtrar enlaces
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
# # Requerido para FilterTools
# # itemlist = filtertools.get_links(itemlist, item, list_language)
# # Requerido para AutoPlay
# autoplay.start(itemlist, item)
# if item.contentType != 'episode':
# if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
# itemlist.append(
# Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
# action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
# return itemlist
# def findvideos_movie(item):
# logger.info(" findvideos_movie")
# # Carica la pagina
# data = httptools.downloadpage(item.url).data
# patron = r"<a href='([^']+)'[^>]*?>[^<]*?<img src='[^']+' style='[^']+' alt='[^']+'>[^<]+?</a>"
# matches = re.compile(patron, re.DOTALL).findall(data)
# for scrapedurl in matches:
# url, c = unshorten(scrapedurl)
# data += url + '\n'
# itemlist = servertools.find_video_items(data=data)
# for videoitem in itemlist:
# videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
# videoitem.fulltitle = item.fulltitle
# videoitem.thumbnail = item.thumbnail
# videoitem.show = item.show
# videoitem.plot = item.plot
# videoitem.channel = item.channel
# videoitem.contentType = item.contentType
# return itemlist
if item.contentType == 'movie': return support.server(item)
else: return support.server(item, item.url)

View File

@@ -228,7 +228,6 @@ def get_default_settings(channel_name):
categories = get_channel_json(channel_name).get('categories', list())
not_active = get_channel_json(channel_name).get('not_active', list())
default_off = get_channel_json(channel_name).get('default_off', list())
logger.info('NON ATTIVI= ' + str(not_active))
# Apply default configurations if they do not exist
for control in default_controls:
@@ -364,6 +363,7 @@ def set_channel_setting(name, value, channel):
file_settings = os.path.join(config.get_data_path(), "settings_channels", channel + "_data.json")
dict_settings = {}
def_settings = get_default_settings(channel)
dict_file = None
@@ -377,10 +377,22 @@ def set_channel_setting(name, value, channel):
dict_settings[name] = value
# delete unused Settings
def_keys = []
del_keys = []
for key in def_settings:
def_keys.append(key['id'])
for key in dict_settings:
if key not in def_keys:
del_keys.append(key)
for key in del_keys:
del dict_settings[key]
# comprobamos si existe dict_file y es un diccionario, sino lo creamos
if dict_file is None or not dict_file:
dict_file = {}
dict_file['settings'] = dict_settings
# Creamos el archivo ../settings/channel_data.json

View File

@@ -217,11 +217,11 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
if scraped['season'] != None:
season = scraped['season']
if stagione:
episode = season +'x'+ scraped['episode']
elif item.contentType == 'tvshow' and (scraped['episode'] == '' and season == ''):
if scraped['season']:
episode = scraped['season'] +'x'+ scraped['episode']
elif stagione:
episode = stagione +'x'+ scraped['episode']
elif item.contentType == 'tvshow' and (scraped['episode'] == '' and scraped['season'] == '' and stagione == ''):
item.news = 'season_completed'
episode = ''
else:
@@ -400,12 +400,13 @@ def scrape(func):
if 'itemlistHook' in args:
itemlist = args['itemlistHook'](itemlist)
if patronNext and inspect.stack()[1][3] != 'newest':
nextPage(itemlist, item, data, patronNext, function)
if (pagination and len(matches) <= pag * pagination) or not pagination: # next page with pagination
if patronNext and inspect.stack()[1][3] != 'newest':
nextPage(itemlist, item, data, patronNext, function)
# next page for pagination
if pagination and len(matches) >= pag * pagination:
if pagination and len(matches) > pag * pagination and not search:
if inspect.stack()[1][3] != 'get_newest':
itemlist.append(
Item(channel=item.channel,

View File

@@ -224,6 +224,7 @@ def save_movie(item):
return 0, 0, -1
def filter_list(episodelist, action=None, path=None):
if path: path = path.decode('utf8')
channel_prefs = {}
lang_sel = quality_sel = show_title = channel =''
if action:
@@ -396,13 +397,13 @@ def save_tvshow(item, episodelist):
_id = item.infoLabels['code'][0]
if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']:
base_name = item.infoLabels['originaltitle']
base_name = item.infoLabels[u'originaltitle']
elif item.infoLabels['tvshowtitle']:
base_name = item.infoLabels['tvshowtitle']
base_name = item.infoLabels[u'tvshowtitle']
elif item.infoLabels['title']:
base_name = item.infoLabels['title']
base_name = item.infoLabels[u'title']
else:
base_name = item.contentSerieName
base_name = u'%s' % item.contentSerieName
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8")

View File

@@ -35,7 +35,7 @@ class UnshortenIt(object):
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_cryptmango_regex = r'cryptmango'
_cryptmango_regex = r'cryptmango|xshield\.net'
_vcrypt_regex = r'vcrypt\.net'
_maxretries = 5
@@ -467,6 +467,7 @@ class UnshortenIt(object):
except Exception as e:
return uri, str(e)
def _unshorten_vcrypt(self, uri):
r = None
import base64, pyaes

View File

@@ -20,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
patronvideos = [
r'(https?://(gestyy|rapidteria|sprysphere)\.com/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(vcrypt|linkup)\.[^/]+/[^/]+/[a-zA-Z0-9_]+)',
r'(https?://(?:www\.)?(bit)\.ly/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(bit)\.ly/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(xshield)\.[^/]+/[^/]+/[^/]+/[a-zA-Z0-9_\.]+)'
]
for patron in patronvideos:
@@ -41,6 +42,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
replace_headers=True,
headers={'User-Agent': 'curl/7.59.0'})
data = resp.headers.get("location", "")
elif 'xshield' in url:
from lib import unshortenit
data, status = unshortenit.unshorten(url)
logger.info("Data - Status zcrypt xshield.net: [%s] [%s] " %(data, status))
elif 'vcrypt.net' in url:
from lib import unshortenit
data, status = unshortenit.unshorten(url)
@@ -49,7 +54,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
idata = httptools.downloadpage(url).data
data = scrapertoolsV2.find_single_match(idata, "<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>")
#fix by greko inizio
if not data:
if not data:
data = scrapertoolsV2.find_single_match(idata, 'action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">')
from lib import unshortenit
data, status = unshortenit.unshorten(url)