This commit is contained in:
Whiplash
2019-11-01 20:01:27 +01:00
40 changed files with 1217 additions and 1607 deletions

View File

@@ -1,7 +1,7 @@
{
"altadefinizione01": "https://www.altadefinizione01.cc",
"altadefinizione01_club": "https://www.altadefinizione01.cc",
"altadefinizione01_link": "http://altadefinizione01.town",
"altadefinizione01_link": "http://altadefinizione01.gift",
"altadefinizioneclick": "https://altadefinizione.cloud",
"altadefinizionehd": "https://altadefinizione.wtf",
"animeforce": "https://ww1.animeforce.org",
@@ -21,7 +21,7 @@
"fastsubita": "http://fastsubita.com",
"filmgratis": "https://www.filmaltadefinizione.net",
"filmigratis": "https://filmigratis.org",
"filmpertutti": "https://www.filmpertutti.pub",
"filmpertutti": "https://www.filmpertutti.link",
"filmsenzalimiti": "https://filmsenzalimiti.best",
"filmsenzalimiticc": "https://www.filmsenzalimiti.press",
"filmstreaming01": "https://filmstreaming01.com",
@@ -32,18 +32,18 @@
"guardaseriecc": "https://guardaserie.site",
"guardaserieclick": "https://www.guardaserie.media",
"guardogratis": "https://guardogratis.net",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.se",
"ilgeniodellostreaming": "https://igds.red",
"italiafilm": "https://www.italia-film.pw",
"italiafilmhd": "https://italiafilm.info",
"italiaserie": "https://italiaserie.org",
"itastreaming": "https://itastreaming.film",
"mondolunatico": "http://mondolunatico.org",
"mondolunatico2": "https://mondolunatico.org/stream",
"mondolunatico2": "https://mondolunatico.org:443/stream",
"mondoserietv": "https://mondoserietv.com",
"piratestreaming": "https://www.piratestreaming.media",
"polpotv": "https://polpo.tv",
"seriehd": "https://www.seriehd.zone",
"serietvonline": "https://serietvonline.tech",
"seriehd": "https://www.seriehd.moda",
"serietvonline": "https://serietvonline.best",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.club",
"streamingaltadefinizione": "https://www.popcornstream.best",
@@ -52,4 +52,4 @@
"toonitalia": "https://toonitalia.org",
"vedohd": "https://vedohd.video",
"vvvvid": "https://www.vvvvid.it"
}
}

View File

@@ -11,11 +11,9 @@ host = support.config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['directo', 'openload']
list_servers = ['directo', 'openload', 'vvvvid']
list_quality = ['default']
checklinks = support.config.get_setting('checklinks', __channel__)
checklinks_number = support.config.get_setting('checklinks_number', __channel__)
headers = [['Referer', host]]
@@ -38,7 +36,7 @@ def newest(categoria):
item.contentType = 'tvshow'
item.url = host
item.args = 'newest'
itemlist = peliculas(item)
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
@@ -65,15 +63,6 @@ def peliculas(item):
if item.args == 'newest':
patron = r'<a href="(?P<url>[^"]+)">\s*<img src="(?P<thumb>[^"]+)" alt="(?P<title>.*?)(?: Sub| sub| SUB|")'
def itemHook(item):
url = support.match(item, '<a href="([^"]+)" title="[^"]+" target="[^"]+" class="btn', headers=headers)[0]
item.url = url[0] if url else ''
delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodi.*)')
number = support.scrapertoolsV2.find_single_match(item.title, r'Episodi(?:o)? (?:\d+÷)?(\d+)')
item.title = support.typo(number + ' - ','bold') + item.title.replace(delete,'')
item.fulltitle = item.show = item.title.replace(delete,'')
item.number = number
return item
action = 'findvideos'
elif item.args == 'last':
@@ -83,8 +72,23 @@ def peliculas(item):
pagination = ''
patron = r'<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]'
else:
pagination = ''
patron = r'<a href="(?P<url>[^"]+)">\s*<strong[^>]+>(?P<title>[^<]+)<'
def itemHook(item):
if 'sub-ita' in item.url:
if item.args != 'newest': item.title = item.title + support.typo('Sub-ITA','_ [] color kod')
item.contentLanguage = 'Sub-ITA'
if item.args == 'newest':
url = support.match(item, '<a href="([^"]+)" title="[^"]+" target="[^"]+" class="btn', headers=headers)[0]
item.url = url[0] if url else ''
delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodi.*)')
episode = support.scrapertoolsV2.find_single_match(item.title, r'Episodi(?:o)? (?:\d+÷)?(\d+)')
item.title = support.typo(episode + ' - ','bold') + item.title.replace(delete,'')
item.fulltitle = item.show = item.title.replace(delete,'')
item.episode = episode
return item
return locals()
@@ -104,12 +108,16 @@ def findvideos(item):
itemlist = []
if item.number:
if item.episode:
from lib import unshortenit
url, c = unshortenit.unshorten(item.url)
url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.number ,url=url)[0]
url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.episode ,url=url)[0]
item.url = url[0] if url else ''
if 'vvvvid' in item.url:
item.action = 'play'
itemlist.append(item)
if 'http' not in item.url:
if '//' in item.url[:2]:
item.url = 'http:' + item.url
@@ -126,11 +134,9 @@ def findvideos(item):
for video in matches:
itemlist.append(
support.Item(channel=item.channel,
action="play",
title='diretto',
url=video,
server='directo'))
action="play",
title='diretto',
url=video,
server='directo'))
support.server(item, itemlist=itemlist)
return itemlist
return support.server(item, itemlist=itemlist)

View File

@@ -15,7 +15,7 @@ headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/
list_servers = ['verystream','openload','rapidvideo','streamango']
list_quality = ['default']
@support.menu
def mainlist(item):
@@ -24,7 +24,7 @@ def mainlist(item):
('ITA', ['/category/anime-ita/', 'peliculas']),
('SUB-ITA', ['/category/anime-sub-ita/', 'peliculas']),
('Conclusi', ['/category/serie-anime-concluse/', 'peliculas']),
('in Corso', ['/category/serie-anime-in-corso/', 'last_ep']),
('in Corso', ['/category/serie-anime-in-corso/', 'peliculas']),
('Genere', ['', 'genres'])
]
@@ -33,7 +33,7 @@ def mainlist(item):
def search(item, texto):
support.log(texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
@@ -76,7 +76,8 @@ def peliculas(item):
@support.scrape
def episodios(item):
data = support.match(item, headers=headers)[1]
if 'Lista Episodi' not in data:
if not any(x in data for x in ['Lista Episodi', 'Movie Parte']):
support.log('NOT IN DATA')
patron = r'(?:iframe src|str)="(?P<url>[^"]+)"'
title = item.title
def fullItemlistHook(itemlist):
@@ -94,11 +95,15 @@ def episodios(item):
url = item.url
anime = True
patronBlock = r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(?P<block>.*?)</span></a></div>'
patron = r'(?:<a href="(?P<url>[^"]+)"[^>]+>)?<span class="pagelink">(?P<episode>\d+)</span>'
patron = r'(?:<a href="(?P<url>[^"]+)"[^>]+>)?<span class="pagelink">(?P<episode>\d+)'
def itemHook(item):
if not item.url:
item.url = url
item.title = support.typo('Episodio ', 'bold') + item.title
if 'Movie Parte' in data:
item.title = support.typo(item.fulltitle + ' - Part ','bold') + item.title
item.contentType = 'movie'
else:
item.title = support.typo('Episodio ', 'bold') + item.title
return item
return locals()

View File

@@ -17,8 +17,8 @@ list_quality = ['default', '480p', '720p', '1080p']
@support.menu
def mainlist(item):
anime = ['/animelist?load_all=1',
anime = ['/animelist?load_all=1',
('Più Votati',['/toplist','menu', 'top']),
('In Corso',['/animeincorso','peliculas','incorso']),
('Ultimi Episodi',['/fetch_pages.php?request=episodes','peliculas','updated'])]
@@ -32,7 +32,7 @@ def search(item, texto):
item.contentType = 'tvshow'
anime = True
patron = r'href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>[^<|(]+)(?:(?P<lang>\(([^\)]+)\)))?<|\)'
action = 'check'
action = 'check'
return locals()
@@ -45,7 +45,7 @@ def newest(categoria):
item.url = host + '/fetch_pages.php?request=episodes'
item.args = "updated"
return peliculas(item)
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -64,27 +64,29 @@ def menu(item):
@support.scrape
def peliculas(item):
anime = True
deflang= 'Sub-ITA'
if item.args == 'updated':
post = "page=" + str(item.page if item.page else 1) if item.page > 1 else None
page, data = support.match(item, r'data-page="(\d+)" title="Next">', post=post, headers=headers)
patron = r'<img alt="[^"]+" src="(?P<thumb>[^"]+)" [^>]+></div></a>\s*<a href="(?P<url>[^"]+)"><div class="testo">(?P<title>[^\(<]+)(?:(?P<lang>\(([^\)]+)\)))?</div></a>\s*<a href="[^"]+"><div class="testo2">[^\d]+(?P<episode>\d+)</div></a>'
if page: nextpage = page
item.contentType='episode'
action = 'findvideos'
elif item.args == 'top':
data = item.url
patron = r'<a href="(?P<url>[^"]+)">[^>]+>(?P<title>[^<\(]+)(?:\((?P<year>[^\)]+)\))?</div></a><div class="numero">(?P<title2>[^<]+)</div>.*?src="(?P<thumb>[^"]+)"'
patron = r'<a href="(?P<url>[^"]+)">[^>]+>(?P<title>[^<\(]+)(?:\((?P<year>[0-9]+)\))?(?:\((?P<lang>[A-Za-z]+)\))?</div></a><div class="numero">(?P<title2>[^<]+)</div>.*?src="(?P<thumb>[^"]+)"'
action = 'check'
else:
pagination = ''
if item.args == 'incorso': patron = r'"slider_title" href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+>(?P<title>[^\(<]+)(?:\((?P<year>\d+)\))?</a>'
else: patron = r'href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>[^<|(]+)(?:(?P<lang>\(([^\)]+)\)))?<|\)'
if item.args == 'incorso': patron = r'"slider_title" href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+>(?P<title>[^\(<]+)(?:\((?P<year>\d+)\))?</a>'
else: patron = r'href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>.+?)(?:\((?P<lang>ITA)\))?(?:(?P<year>\((\d+)\)))?</span>'
action = 'check'
return locals()
def check(item):
movie, data = support.match(item, r'Episodi:</b> (\d*) Movie')
anime_id = support.match(data, r'anime_id=(\d+)')[0][0]
anime_id = support.match(data, r'anime_id=(\d+)')[0][0]
item.url = host + "/loading_anime?anime_id=" + anime_id
if movie:
item.contentType = 'movie'

View File

@@ -5,10 +5,10 @@
"""
Problemi noti che non superano il test del canale:
-
-
Avvisi:
- Sub-ita è nel titolo, lascia il puntatore sulla locandina
- Sub-ita non è nel titolo, lascia il puntatore sulla locandina
per visualizzare il titolo completo!
Novità:
@@ -17,21 +17,10 @@
"""
# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato
# ma fare PULIZIA quando si è finito di testarlo
# Qui gli import
#import re
# per l'uso dei decoratori, per i log, e funzioni per siti particolari
from core import support
# in caso di necessità
from core import scrapertoolsV2, httptools
from core.item import Item
#from lib import unshortenit
##### fine import
host = ""
headers = ""
@@ -45,17 +34,13 @@ def findhost():
host = host[:-1]
findhost()
# server di esempio...
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
# quality di esempio
list_quality = ['default', 'HD', '3D', '4K', 'DVD', 'SD']
@support.menu
def mainlist(item):
support.log(item)
# Ordine delle voci
# Voce FILM, puoi solo impostare l'url
film = ['',
('Al Cinema', ['/category/in-sala/', 'peliculas', '']),
('Novità', ['/category/nuove-uscite/', 'peliculas', '']),
@@ -69,9 +54,8 @@ def mainlist(item):
@support.scrape
def peliculas(item):
support.log(item)
#dbg # decommentare per attivare web_pdb
#support.dbg() # decommentare per attivare web_pdb
## action = 'episodios'
blacklist = ['']
if item.args != 'search':
patron = r'<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="(?P<url>[^"]+)" title="(?P<title>.+?)[ ]?(?:\[(?P<lang>Sub-ITA)\])?".*?<img(?:.+?)?src="(?P<thumb>[^"]+)"'
@@ -79,16 +63,17 @@ def peliculas(item):
else:
patron = r'<li class="col-md-12 itemlist">.*?<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)".*?<img src="(?P<thumb>[^"]+)".*?Film dell"anno: (?P<year>\d{4})(?:[\d\-]+)?</p> <p class="text-list">(?P<plot>[^<>]+)</p>'
patronBlock = r'<ul class="search-results-content infinite">(?P<block>.*?)</section>'
patronNext = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
#debug = True # True per testare le regex sul sito
patronNext = '<a href="([^"]+)"\s+?><i class="glyphicon glyphicon-chevron-right"'
#debug = True
return locals()
@support.scrape
def genres(item):
support.log(item)
#dbg
#support.dbg()
action = 'peliculas'
blacklist = ['']

View File

@@ -6,15 +6,7 @@
"adult": false,
"thumbnail": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
"banner": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
"categories": ["tvshow", "movie","anime"],
"settings": [
{
"id": "include_in_newest_series",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
"categories": ["movie","tvshow","anime"],
"not_active": ["include_in_newest"],
"settings": []
}

View File

@@ -1,205 +1,198 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per cinemalibero
# Canale per 'cinemaLibero'
# ------------------------------------------------------------
"""
Questi sono commenti per i beta-tester.
Il canale non permette di filtrare film, serie e altro nella ricerca.
Quindi vengono disabilitate le voci:
- "Aggiungi in videoteca"
- "Scarica film/serie"
per le solo ricerce: nel canale e globale.
Su questo canale in:
- Cerca ( nel canale ) e Ricerca Globale
- SerieTV e novità del canale
- Novità -> SerieTV
non saranno presenti le voci:
- 'Aggiungi alla Videoteca',
- 'Scarica Serie'
- NON SONO PRESENTI IN NOVITà GLOBALE E del CANALE RIGUARDANTI LO SPORT!!!!
dunque, la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
NON CONTROLLARE LA SEZIONE SPORT, HA PROBLEMI!!!
è stata eliminata dall'elenco ma i titoli possono apparire nella ricerca o tra le novità
Non è errore se dà problemi!!! NON CONSIDERATELA!
Novità. Indicare in quale/i sezione/i è presente il canale:
- FILM
Problemi noti che non superano il test del canale:
-
Avvisi:
-
- Eventuali avvisi per i tester
Ulteriori info:
"""
import re
# per l'uso dei decoratori, per i log, e funzioni per siti particolari
from core import support
# se non si fa uso di findhost()
from core import httptools, support, scrapertoolsV2
from core.item import Item
from platformcode import config
# in caso di necessità
from core import scrapertoolsV2, httptools#, servertools
from core.item import Item # per newest
#from lib import unshortenit
list_servers = ['akstream', 'wstream', 'backin', 'verystream', 'openload', 'streamango']
list_quality = ['default']
__channel__ = "cinemalibero"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
##headers = [
## ['Host', host.split("//")[-1].split("/")[0]],
## ['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'],
## ['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
## ['Accept-Language', 'en-US,en;q=0.5'],
## ['Accept-Encoding', 'gzip, deflate'],
## ['Referer', host],
## ['DNT', '1'],
## ['Connection', 'keep-alive'],
## ['Upgrade-Insecure-Requests', '1'],
## ['Cache-Control', 'max-age=0']
## ]
list_servers = ['akstream', 'wstream', 'openload', 'streamango']
list_quality = ['default']
headers = [['Referer', host]]
@support.menu
def mainlist(item):
support.log(item)
support.log()
film = ['/category/film/',
('Generi', ['', 'genres', 'genres']),
('Generi', ['', 'genres'])
]
tvshow = ['/category/serie-tv/',
## ('Novità', ['/aggiornamenti-serie-tv/', 'peliculas', 'update']),
tvshow = ['/category/serie-tv/'
]
anime = ['/category/anime-giapponesi/',
]
Anime = [(support.typo('Anime', 'bullet bold'),['/category/anime-giapponesi/', 'peliculas', 'anime', 'tvshow'])
]
## Sport = [(support.typo('Sport', 'bullet bold'), ['/category/sport/', 'peliculas', '', 'tvshow'])
## ]
## news = [('Novità Serie-Anime', ['/aggiornamenti-serie-tv/', 'peliculas', 'update', 'tvshow'])]
search = ''
return locals()
@support.scrape
#def video(item):
def peliculas(item):
support.log(item)
#support.dbg() # decommentare per attivare web_pdb
debug = True
blacklist = ['']
support.log()
#debug = True
if item.args == 'search':
patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">.+?class="titolo">(?P<title>[^<]+)<'
patronBlock = r'style="color: #2C3549 !important;" class="fon my-3"><small>.+?</small></h1>(?P<block>.*?)<div class="bg-dark ">'
patronBlock = r'<div class="container">.*?class="col-md-12[^"]*?">(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
if item.contentType == 'movie':
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?P<quality>[^<]+)?<'
elif item.args == 'anime':
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?:.+?[^fFiInNeE]+?\(?(?P<lang>[sSuUbBiItTaA]+)\)?.+?)<'
elif item.args == 'update':
action = 'select'
patron = r'<div class="card-body p-0">\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">\s<div class="titolo">(?P<title>.+?)(?: &#8211; Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>[ ]<div class="genere">(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
else:
if item.contentType == 'tvshow':
# da sistemare per matchare le ultime serie inserite
if item.args == 'update':
patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^)]+)\)">[^>]+>(?P<title>.+?)(?:[ ]\((?P<lang>SubITA)\))?(?:[ ](?P<year>\d{4}))?</div> <div class="genere">(?:|Ep.)(?:|.+?)?</div>'
action = 'select'
else:
## #patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)<[^>]+>[^>]+>(?:.+?) (?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?'
## #patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?<[^>]+>(?:<div class="genere">.+?(?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?)?'
patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?</div><div class="genere">(?:.?(?P<episode>\d+x\d+-\d+|\d+-\d+|\d+x\d+|\d+)[ ]?(?:\()?(?:(?P<lang>ITA|iTA|Sub ITA|Sub iTA|Sub))?[ ]?(?:(?P<quality>HD))?.+?)</div>'
action = 'episodios'
if 'anime' in item.url:
patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?<[^>]+>(?:<div class="genere">.+?(?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?)?'
action = 'select'
elif item.contentType == 'movie':
action = 'findvideos'
patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?:\[(?P<lang>Sub-iTA|Sub-ITA|Sub)\])?[ ]\((?P<year>\d+)\)</div>(?:<div class="genere">(?P<quality>[^<]+)<)?'
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
patronBlock = r'<h1(?: style="color: #2C3549 !important; text-transform: uppercase;"| style="text-transform: uppercase; color: #2C3549 !important;"| style="color: #2C3549 !important; text-transform: uppercase;" style="text-shadow: 1px 1px 1px #FF8C00; color:#FF8C00;"| style="text-shadow: 1px 1px 1px #0f0f0f;" class="darkorange"| style="color:#2C3549 !important;")>.+?</h1>(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
def itemHook(item):
if item.lang2:
if len(item.lang2)<3:
item.lang2 = 'ITA'
item.contentLanguage = item.lang2
item.title += support.typo(item.lang2, '_ [] color kod')
patronNext = '<a class="next page-numbers".*?href="([^"]+)">'
if item.contentType == 'movie':
item.action = 'findvideos'
elif item.args == 'anime' or item.args == 'update' or item.args == 'search':
item.action = 'select'
elif item.contentType == 'tvshow':
item.extra = 'serie'
item.action = 'episodios'
else:
item.action = 'select'
return item
patronNext = r'<a class="next page-numbers".*?href="([^"]+)">'
return locals()
@support.scrape
def episodios(item):
support.log(item)
def episodios(item): # Questa def. deve sempre essere nominata episodios
support.log()
#dbg
## if item.args == '':
## patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(\d+)?)(?:\((?P<lang>Sub ITA)\))?</a><br />'
## patronBlock = r'(?:class="txt_dow">Streaming:(?P<block>.*?)at-below-post)'
if item.data1 and 'stagione' not in item.data1.lower():
# è un movie
item.contentType = 'tvshow'
#patron = r'(?:href="[ ]?(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)<|(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?(?:(\4[^<]+)(\2.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>))'
patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(?P<episode>\d+)?)(?:\((?P<lang>Sub ITA)\))?</a>(?:<br />)?'
patronBlock = r'(?:class="txt_dow">Streaming:(?P<block>.*?)at-below-post)'
else:
if item.extra == 'serie':
support.log("Serie :", item)
patron = r'(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?[ ]?(?:(?P<title>[^<]+)(?P<url>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)'
## patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(\d+)?)(?:\((?P<lang>Sub ITA)\))?</a><br />'
patronBlock = r'<p><strong>(?P<block>(?:.+?[Ss]tagione.+?(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?(?:|.+?|</strong>)(/?:</span>)?</p>.*?</p>)'
item.contentType = 'tvshow'
action = 'findvideos'
blacklist = ['']
## pagination = ''
## debug = True
item.contentSerieName = item.fulltitle
elif item.args == 'anime':
support.log("Anime :", item)
blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555']
#patron = r'(?:href="[ ]?(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+))<|(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?(?:(\4[^<]+)(\2.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)'
#patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>(Epis|).+?(?P<episode>\d+)?)(?:\((?P<lang>Sub ITA)\))?</a>(?:<br />)?'
patron = r'<a target=(?P<url>.+?(?:rel="noopener noreferrer">(?P<title>[^<]+)))</a>.+?(?:</a></p>|</a><br />)'
patronBlock = r'Streaming.+?:(?P<block>.*?)</div>'
#patronBlock = r'(?:<p>)?(?P<block>.*?)(?:</a><br /> |</p><div)'
item.contentType = 'tvshow'
item.contentSerieName = item.fulltitle
else:
support.log('extra = else --- select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
show=item.fulltitle,
contentType='movie'))
debug = True
return locals()
@support.scrape
def genres(item):
support.log(item)
#dbg
support.log()
action = 'peliculas'
blacklist = ['']
patron = r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
action='peliculas'
patron_block=r'<div id="bordobar" class="dropdown-menu(?P<block>.*?)</li>'
patron=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
return locals()
def select(item):
support.log('select --->', item)
#support.dbg()
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<div style="margin-left: 0.5%; color: #FFF;">')
block = scrapertoolsV2.find_single_match(data, r'Streaming\s?[\w]+?:(.*?)<\/div>')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
contentSerieName = fulltitle,
url=item.url,
args='serie',
contentType='tvshow',
data1 = data
))
extra='serie',
contentType='episode'))
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodio', block, re.IGNORECASE) or re.findall('stagione', data, re.IGNORECASE):
if re.findall('episodio', block, re.IGNORECASE) or re.findall('episodi streaming', block, re.IGNORECASE) or \
re.findall('numero stagioni', data, re.IGNORECASE):
support.log('select = ### è un anime ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
contentSerieName = item.fulltitle,
url=item.url,
args='anime',
contentType='tvshow',
data1 = data
))
contentType='episode'))
else:
support.log('select = ### è un film ###')
support.log('select anime ELSE = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
args = '',
contentType='movie',
#data = data
))
contentType='movie'))
else:
support.log('select = ### è un film ###')
support.log('select ELSE = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie',
#data = data
))
contentType='movie'))
def search(item, texto):
support.log("[cinemalibero.py] " + item.url + " search " + texto)
support.log(item.url,texto)
item.url = host + "/?s=" + texto
item.contentType = 'episode'
item.args = 'search'
try:
item.args = 'search'
item.contentType = 'episode' # non fa uscire le voci nel context menu
return peliculas(item)
# Continua la ricerca in caso di errore
except:
@@ -214,26 +207,30 @@ def newest(categoria):
item = Item()
try:
if categoria == 'peliculas':
item.args = 'update'
item.url = host+'/aggiornamenti-serie-tv/'
item.contentType = 'tvshow'
item.action = 'peliculas'
itemlist = peliculas(item)
item.url = host+'/category/film/'
item.contentType = 'movie'
## item.action = 'peliculas'
## itemlist = peliculas(item)
## elif categoria == 'series':
## item.contentType = 'tvshow'
## item.args = 'update'
## item.url = host+'/aggiornamenti-serie-tv/'
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
log('newest log: ', {0}.format(line))
support.log('newest log: ', {0}.format(line))
return []
return itemlist
def findvideos(item):
support.log(item)
support.log('findvideos ->', item)
if item.contentType == 'movie':
return support.server(item)
else:

View File

@@ -7,5 +7,6 @@
"thumbnail": "dreamsub.png",
"banner": "dreamsub.png",
"categories": ["anime", "vos"],
"not_active": ["include_in_newest"],
"settings": []
}

View File

@@ -6,46 +6,49 @@
"""
Problemi noti che non superano il test del canale:
- indicare i problemi
- Nessuno noto!
Avvisi:
- Gli episodi sono divisi per pagine di 20
- In Novità->Anime, cliccare sulla home il bottone "Ultime inserite"
Avvisi per i tester:
1. Gli episodi sono divisi per pagine di 20
2. In Novità->Anime, cliccare sulla home il bottone "Ultime inserite"
Se avete più titoli in KOD, ridimensiona il browser in modo che si vedano i titoli
a gruppi di 3 e ricontrollare, è un problema del sito.
3.Passaggi per Aggiungere in videoteca e/o scaricare Serie:
1. sul titolo -> menu contestuale -> Rinumerazione
Solo dopo questo passaggio appariranno le voci, sul titolo -> menu contestuale ->:
- Aggiungi in videoteca (senza rinumerazione non appare
la voce)
- Scarica Serie e Scarica Stagione ( Se download Abilitato! )
Ulteriori info:
-
4. ### PIù IMPORTANTE!!! ###
#### NON E' DA CONSIDERARE ERRORE NEL TEST QUANTO RIPORTATO DI SEGUITO!!!! ####
1. Il sito permette un filtro tra anime e film, tramite url.
Se nell'url c'è /anime/, sul titolo e proseguendo fino alla pagina del video, saranno
presenti le voci:
- 'Rinumerazione', prima, e dopo: 'Aggiungi in videoteca', 'Scarica Serie' etc...
Tutto il resto è trattato come film e si avranno le voci solite:
AD eccezione per quei "FILM" che hanno 2 o più titoli all'interno, in questo caso:
1. Non apparirà nessuna voce tra "Aggiungi in videoteca" e "Scarica Film" e nemmeno "rinumerazione"
2. Dopo essere entrato nella pagina del Titolo Principale, troverai una lista di titoli dove sarà possibile scaricare
il filmato (chiamato EPISODIO) stessa cosa accedendo alla pagina ultima del video
3. Questi TITOLI NON POSSONO ESSERE AGGIUNTI IN VIDEOTECA
le voci "Scarica FILM" si avranno dopo.
Es:
https://www.dreamsub.stream/movie/5-centimetri-al-secondo -> film ma ha 3 titoli
Il Canale NON è presente nelle novità(globale) -> Anime
-------------------------------------------------------
NOTA per i DEV:
- Dai risultati dei Menu vengono tolti quei titoli
che non hanno la corrispettiva parola nel link, secondo lo schema seguente:
Menu Parole nel link
---------------------------
OAV oav
OVA ova
Speciali movie
Movie movie
Serie Tutti gli altri casi
Es:
https://www.dreamsub.stream/oav/another-the-other - è un OAV
"""
# Qui gli import
import re
import copy
from core import support
from platformcode import config
##from specials.autorenumber import renumber
from specials import autorenumber
# in caso di necessità
from core import scrapertoolsV2, httptools, servertools, tmdb
from core.item import Item
#from lib import unshortenit
##### fine import
__channel__ = "dreamsub"
@@ -53,7 +56,7 @@ host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
# server di esempio...
list_servers = ['verystream', 'streamango', 'openload', 'directo']
list_servers = ['directo', 'verystream', 'streamango', 'openload']
# quality di esempio
list_quality = ['default']
@@ -63,19 +66,15 @@ list_quality = ['default']
def mainlist(item):
support.log(item)
anime = ['/anime',
## ('Novità', ['']),
## ('OAV', ['/search/oav', 'peliculas', 'oav']),
## ('OVA', ['/search/ova', 'peliculas', 'ova']),
('Movie', ['/search/movie', 'peliculas', 'special']),
('Film', ['/search/film', 'peliculas', 'special']),
('Movie', ['/search/movie', 'peliculas', '', 'movie']),
('Film', ['/search/film', 'peliculas', '', 'movie']),
('Categorie', ['/filter?genere=','genres']),
## ('Ultimi Episodi', ['', 'last'])
]
"""
Eventuali Menu per voci non contemplate!
"""
return locals()
@@ -85,53 +84,37 @@ def peliculas(item):
#dbg # decommentare per attivare web_pdb
anime = True
action = 'episodios'
item.contentType = 'tvshow'
if item.args == 'newest':
patronBlock = r'<div class="showRoomGoLeft" sr="ultime"></div>(?P<block>.*?)<div class="showRoomGoRight" sr="ultime">'
patronBlock = r'<div class="showRoomGoLeft" sr="ultime"></div>(?P<block>.*?)<div class="showRoomGoRight" sr="ultime">'
else:
patronBlock = r'<input type="submit" value="Vai!" class="blueButton">(?P<block>.*?)<div class="footer">'
patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+).+?Stato streaming: '\
'(?:[^<]+)<.*?Lingua:[ ](?P<lang1>ITA\/JAP|ITA|JAP)?(?:[ ])?'\
'(?P<lang2>SUB ITA)?<br>.+?href="(?P<url>[^"]+)".+?'\
'background: url\((?P<thumb>[^"]+)\).+?<div class="tvTitle">.+?'\
'<strong>Anno di inizio</strong>: (?P<year>\d+)<br>'
## patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+).+?Stato streaming: '\
## '(?:[^<]+)<.*?Lingua:[ ](?P<lang1>ITA\/JAP|ITA|JAP)?(?:[ ])?'\
## '(?P<lang2>SUB ITA)?<br>.+?href="(?P<url>[^"]+)".+?'\
## 'background: url\((?P<thumb>[^"]+)\).+?<div class="tvTitle">.+?'\
## '<strong>Anno di inizio</strong>: (?P<year>\d+)<br>'
patron = r'<div class="showStreaming"> <b>(?P<title>[^<]+).+?Stato streaming: (?:[^<]+)<.*?Lingua:[ ](?P<lang1>ITA\/JAP|ITA|JAP)?(?:[ ])?(?P<lang2>SUB ITA)?<br>.+?href="(?P<url>[^"]+)".+?background: url\((?P<thumb>[^"]+)\).+?<div class="tvTitle">.+?Episodi[^>]+>.\s?(?P<nep>\d+).+?<strong>Anno di inizio</strong>: (?P<year>\d+)<br>'
patronNext = '<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">'
def itemHook(item):
support.log("ITEMHOOK -> ", item)
lang = []
if item.lang1 == 'ITA/JAP' or item.lang1 == 'ITA':
lang.append('ITA')
item = language(item)
if item.lang1 == 'JAP' and item.lang2 == 'SUB ITA' or item.lang2 == 'SUB ITA':
lang.append('Sub-ITA')
support.log("ITEMHOOK LANG-> ", lang)
item.contentLanguage = lang
if len(lang) == 2:
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'+' [COLOR 0xFF65B3DA][' + lang[1] + '][/COLOR]'
elif len(lang) == 1 and lang[0] != 'ITA':
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'
# se si riesce a differenziare in qualche modo tramite il link
## if item.args == 'oav':
## if not '/oav/' in url:
## continue
## elif item.args == 'ova':
## if not '/ova/' in url:
## continue
## elif item.args == 'special':
if item.args == 'search' or item.args == 'special':
## if '/movie/' in item.url:
## item.args = 'special'
## if item.args == 'special':
item.action = 'findvideos'
item.contentType = 'movie'
if not '/movie/' in item.url:
pass
if 'anime' in item.url:
item.contentType = 'tvshow'
item.action = 'episodios'
#item.args = 'anime'
else:
if item.nep == '1':
item.contentType = 'movie'
item.action = 'findvideos'
else:
item.contentType = 'episode'
item.args = ''
item.nep = item.nep
item.action = 'findmovie'
return item
#debug = True
@@ -140,36 +123,18 @@ def peliculas(item):
@support.scrape
def episodios(item):
support.log(item)
#dbg
anime = True
## item.contentType = 'episode'
#support.dbg()
action = 'findvideos'
blacklist = ['']
patronBlock = r'<div class="seasonEp">(?P<block>.*?)<div class="footer">'
patron = r'<li><a href="(?P<url>[^"]+)"[^<]+<b>(?:.+?)[ ](?P<episode>\d+)<\/b>[^>]+>(?P<title>[^<]+)<\/i>[ ]\((?P<lang1>ITA)?\s?.+?\s?(?P<lang2>Sub ITA)?.+?\)<\/a>'
pagination = ''
patron = r'<li><a href="(?P<url>[^"]+)"[^<]+<b>(?:.+?)[ ](?P<episode>\d+)<\/b>[^>]+>(?P<title>[^<]+)<\/i>[ ]\(?(?P<lang1>ITA|Sub ITA)?\s?.?\s?(?P<lang2>Sub ITA)?.+?\)?<\/a>'
#UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 18: ordinal not in range(128)
def itemHook(item):
support.log("ITEMHOOK EPISODE LANG1 -> ", item)
lang = []
if item.lang1 == 'ITA':
lang.append('ITA')
if item.lang2 == 'Sub ITA':
lang.append('Sub-ITA')
support.log("ITEMHOOK EPISODE LANG2-> ", lang)
item.contentLanguage = lang
support.log("ITEMHOOK EPISODE LANG3 -> ", item, lang)
if len(lang) ==2:
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'+' [COLOR 0xFF65B3DA][' + lang[1] + '][/COLOR]'
item.show += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'+' [COLOR 0xFF65B3DA][' + lang[1] + '][/COLOR]'
elif len(lang) == 1 and lang[0] != 'ITA':
item.title += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'
item.show += ' [COLOR 0xFF65B3DA][' + lang[0] + '][/COLOR]'
item = language(item)
return item
pagination = ''
#debug = True
return locals()
@@ -177,6 +142,7 @@ def episodios(item):
def genres(item):
support.log(item)
#dbg
item.contentType = ''
action = 'peliculas'
blacklist = ['tutti']
@@ -187,15 +153,60 @@ def genres(item):
item.contentTitle = item.contentTitle.replace(' ', '+')
item.url = host+'/filter?genere='+item.contentTitle
return item
#debug = True
return locals()
@support.scrape
def findmovie(item):
support.log(item)
patronBlock = r'<div class="seasonEp">(?P<block>.*?)<div class="footer">'
item.contentType = 'episode'
item.nep = 2
patron = r'<li><a href="(?P<url>[^"]+)"[^>]+>.(?P<title2>.+?)-.+?-[ ]<b>(?P<title>.+?)</b>\s+\(?(?P<lang1>ITA)?\s?(?P<lang2>Sub ITA)?.+?\)?'
def itemHook(item):
item = language(item)
return item
#debug = True
return locals()
def language(item):
lang = []
if item.lang1:
if item.lang1.lower() == 'ita/jap' or item.lang1.lower() == 'ita':
lang.append('ITA')
if item.lang1.lower() == 'jap' and item.lang1.lower() == 'sub ita':
lang.append('Sub-ITA')
if item.lang2:
if item.lang2.lower() == 'sub ita':
lang.append('Sub-ITA')
item.contentLanguage = lang
if len(lang) ==2:
item.title += support.typo(lang[0], '_ [] color kod') + support.typo(lang[1], '_ [] color kod')
#item.show += support.typo(lang[0], '_ [] color kod') + support.typo(lang[1], '_ [] color kod')
elif len(lang) == 1:
item.title += support.typo(lang[0], '_ [] color kod')
#item.show += support.typo(lang[0], '_ [] color kod')
return item
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + '/search/' + text
item.contentType = item.contentType
item.args = 'search'
try:
return peliculas(item)
@@ -206,32 +217,11 @@ def search(item, text):
support.log('search log:', line)
return []
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
item.contentType = 'tvshow'
item.args = 'newest'
try:
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
log('newest log: ', {0}.format(line))
return []
return itemlist
# da adattare... ( support.server ha vari parametri )
#support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True)
def findvideos(item):
support.log()
support.log("ITEM ---->", item)
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -241,9 +231,10 @@ def findvideos(item):
patron = r'href="(.+?)"'
block = scrapertoolsV2.find_single_match(data, patronBlock)
urls = scrapertoolsV2.find_multiple_matches(block, patron)
#support.regexDbg(item, patron, headers, data=data)
for url in urls:
titles = item.infoLabels['title']
lang = ''
if 'sub_ita' in url.lower():
lang = 'Sub-ITA'
@@ -269,25 +260,35 @@ def findvideos(item):
else:
data = httptools.downloadpage(url).data
host_video = scrapertoolsV2.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
#host_video = scrapertoolsV2.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
host_video = scrapertoolsV2.find_single_match(data, r'let thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
link = scrapertoolsV2.find_single_match(data, r'<video src="([^"]+)"')
video_urls = host_video+link
title = support.typo(item.fulltitle,'_ bold') + support.typo(lang,'_ [] color kod')
title_show = support.typo(titles,'_ bold') + support.typo(lang,'_ [] color kod')
itemlist.append(
support.Item(channel=item.channel,
action="play",
contentType=item.contentType,
title=title,
fulltitle=title,
show=title,
title=title_show,
fulltitle=item.fulltitle,
show=item.fulltitle,
url=video_urls,
infoLabels=item.infoLabels,
infoLabels = item.infoLabels,
thumbnail=item.thumbnail,
contentSerieName= item.contentSerieName,
contentTitle=title,
contentSerieName= item.fulltitle,
contentTitle=title_show,
contentLanguage = 'ITA' if lang == [] else lang,
args=item.args,
server='directo',
))
if item.contentType != 'episode' and int(item.nep) < 2 :
# Link Aggiungi alla Libreria
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
support.videolibrary(itemlist, item)
# link per scaricare
if config.get_setting('downloadenabled'):
support.download(itemlist, item)
return itemlist

View File

@@ -3,7 +3,7 @@
"name": "Eurostreaming",
"active": true,
"adult": false,
"language": ["ita","vos"],
"language": ["ita","sub-ita"],
"thumbnail": "eurostreaming.png",
"banner": "eurostreaming.png",
"categories": ["tvshow","anime","vos"],

View File

@@ -4,14 +4,17 @@
# by Greko
# ------------------------------------------------------------
"""
Problemi noti:
- Alcune sezioni di anime-cartoni non vanno, alcune hanno solo la lista degli episodi, ma non hanno link,
altre cambiano la struttura
Problemi noti da non considerare come errori nel test:
- Alcune sezioni di anime-cartoni non vanno:
- alcune hanno solo la lista degli episodi, ma non hanno link!
Novità(globale):
- serie, anime
"""
import re
from core import scrapertoolsV2, httptools, support
from core.item import Item
from platformcode import logger, config
from platformcode import config
#impostati dinamicamente da findhost()
host = ""
@@ -32,11 +35,18 @@ list_quality = ['default']
def mainlist(item):
support.log()
tvshow = [
('Archivio ', ['/category/serie-tv-archive/', 'peliculas', '', 'tvshow']),
('Aggiornamenti ', ['/aggiornamento-episodi/', 'peliculas', True, 'tvshow'])
tvshow = [''
]
anime = ['/category/anime-cartoni-animati/']
anime = ['/category/anime-cartoni-animati/'
]
mix = [
(support.typo('Aggiornamenti Serie-Anime', 'bullet bold'), ['/aggiornamento-episodi/', 'peliculas', 'newest']),
(support.typo('Archivio Serie-Anime', 'bullet bold'), ['/category/serie-tv-archive/', 'peliculas'])
]
search = ''
return locals()
@@ -45,25 +55,15 @@ def peliculas(item):
support.log()
action = 'episodios'
if item.args == True:
patron = r'<span class="serieTitle" style="font-size:20px">(?P<title>.*?)'\
'.[^][\s]?<a href="(?P<url>[^"]+)"\s+target="_blank">'\
'(?P<episode>\d+x\d+-\d+|\d+x\d+) (?P<title2>.*?)[ ]?'\
'(?:|\((?P<lang>SUB ITA)\))?</a>'
## # permette di vedere episodio e titolo + titolo2 in novità
## # se attivo questo da problemi nell'aggiunta alla videoteca
## def itemHook(item):
## item.show = item.episode + item.title
## return item
if item.args == 'newest':
#patron = r'<span class="serieTitle" style="font-size:20px">(?P<title>.*?).[^][\s]?<a href="(?P<url>[^"]+)"\s+target="_blank">(?P<episode>\d+x\d+-\d+|\d+x\d+) (?P<title2>.*?)[ ]?(?:|\((?P<lang>SUB ITA)\))?</a>'
patron = r'<span class="serieTitle" style="font-size:20px">(?P<title>.*?).[^–][\s]?<a href="(?P<url>[^"]+)"\s+target="_blank">(?:<episode>\d+x\d+-\d+|\d+x\d+) .*?[ ]?\(?(?P<lang>SUB ITA)?\)?</a>'
pagination = ''
else:
patron = r'<div class="post-thumb">.*?\s<img src="(?P<thumb>[^"]+)".*?>'\
'<a href="(?P<url>[^"]+)".*?>(?P<title>.*?(?:\((?P<year>\d{4})\)'\
'|(\4\d{4}))?)<\/a><\/h2>'
patron = r'<div class="post-thumb">.*?\s<img src="(?P<thumb>[^"]+)".*?><a href="(?P<url>[^"]+)"[^>]+>(?P<title>.+?)\s?(?: Serie Tv)?\s?\(?(?P<year>\d{4})?\)?<\/a><\/h2>'
patronNext='a class="next page-numbers" href="?([^>"]+)">Avanti &raquo;</a>'
## debug = True
#debug = True
return locals()
@support.scrape
@@ -73,32 +73,39 @@ def episodios(item):
action = 'findvideos'
item.contentType = 'tvshow'
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data.replace("'", '"')
data1 = pagina(item.url)
data1 = re.sub('\n|\t', ' ', data1)
data = re.sub(r'>\s+<', '> <', data1)
patronBlock = r'(?P<block>STAGIONE\s\d+ (.+?)?(?:\()?(?P<lang>ITA|SUB ITA)(?:\))?.*?)</div></div>'
#patron = r'(?:\s|\Wn)?(?:<strong>|)?(?P<episode>\d+&#\d+;\d+-\d+|\d+&#\d+;\d+)(?:</strong>|)?(?P<title>.+?)(?:|-.+?-|–.+?–|–|.)?<a (?P<url>.*?)<br />'
patron = r'(?:\s|\Wn)?(?:<strong>|)?(?P<episode>\d+&#\d+;\d+-\d+|\d+&#\d+;\d+)(?:</strong>|)?(?P<title>.+?)(?:–|-.+?-|–.+?–|–|.)?(?:<a (?P<url>.*?))?<br />'
def itemHook(item):
if not item.url:
item.title += ' [B][COLOR red]### NO LINK ###[/COLOR][/B]'
return item
#support.regexDbg(item, patronBlock, headers, data)
#debug = True
return locals()
def pagina(url):
support.log(url)
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
#support.log("DATA ----###----> ", data)
if 'clicca qui per aprire' in data.lower():
item.url = scrapertoolsV2.find_single_match(data, '"go_to":"([^"]+)"')
item.url = item.url.replace("\\","")
url = scrapertoolsV2.find_single_match(data, '"go_to":"([^"]+)"')
url = url.replace("\\","")
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data.replace("'", '"')
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
elif 'clicca qui</span>' in data.lower():
item.url = scrapertoolsV2.find_single_match(data, '<h2 style="text-align: center;"><a href="([^"]+)">')
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data.replace("'", '"')
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
data = re.sub('\n|\t', ' ', data)
patronBlock = r'(?P<block>STAGIONE\s\d+ (?:\()?(?P<lang>ITA|SUB ITA)(?:\))?.*?)</div></div>'
patron = r'(?:\s|\Wn)?(?:<strong>|)?(?P<episode>\d+&#\d+;\d+-\d+|\d+&#\d+;\d+)'\
'(?:</strong>|)?(?P<title>.+?)(?:|-.+?-|–.+?–|–|.)?<a (?P<url>.*?)<br />'
## debug = True
return locals()
# =========== def findvideos =============
def findvideos(item):
support.log('findvideos', item)
return support.server(item, item.url)
return data
# =========== def ricerca =============
def search(item, texto):
@@ -113,29 +120,31 @@ def search(item, texto):
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.log(line)
return []
# =========== def novità in ricerca globale =============
def newest(categoria):
support.log()
itemlist = []
item = Item()
item.contentType = 'tvshow'
item.args = True
item.args = 'newest'
try:
item.url = "%s/aggiornamento-episodi/" % host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
support.log("{0}".format(line))
return []
return itemlist
# =========== def findvideos =============
def findvideos(item):
support.log('findvideos', item)
return support.server(item, item.url)

View File

@@ -7,14 +7,6 @@
"thumbnail": "fastsubita.png",
"banner": "fastsubita.png",
"categories": ["tvshow", "vos"],
"settings": [
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
"not_active": ["include_in_newest_peliculas", "include_in_newest_anime", "include_in_newest_italiano"],
"settings": []
}

View File

@@ -1,201 +1,111 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per fastsubita
# Thanks Icarus crew & Alfa addon & 4l3x87
# Canale per fastsubita.py
# ------------------------------------------------------------
"""
Problemi noti che non superano il test del canale:
- indicare i problemi
Su questo canale, nella categoria 'Ricerca Globale'
non saranno presenti le voci 'Aggiungi alla Videoteca'
e 'Scarica Film'/'Scarica Serie', dunque,
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Avvisi:
- Eventuali avvisi per i tester
Novità. Indicare in quale/i sezione/i è presente il canale:
- serie
Ulteriori info:
- SOLO SUB-ITA
---------------------------------------------------
Per i DEV:
- nella ricerca, anche globale, esce la voce "successivo"
ma apre la maschera per l'inserimento testo
"""
from core import support, httptools, scrapertoolsV2, tmdb
from core import support, httptools, scrapertoolsV2
from core.item import Item
from core.support import log
from platformcode import config #, logger
from platformcode import config
__channel__ = 'fastsubita'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
##IDIOMAS = {'Italiano': 'IT'}
##list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome']
list_quality = ['default']
PERPAGE = 15
@support.menu
def mainlist(item):
tvshow = ['',
('Archivio A-Z ', ['', 'list_az'])
Tvshow = [
('Aggiornamenti', ['', 'peliculas', '', 'tvshow']),
('Per Lettera', ['/elenco-serie-tv/', 'genres', 'genres'])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.log(item)
#dbg # decommentare per attivare web_pdb
#support.dbg()
deflang = 'Sub-ITA'
action = 'findvideos'
blacklist = ['']
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">(?P<lang>Sub-ITA)?'
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)<nav class="navigation pagination" role="navigation">'
if item.args == 'genres':
patronBlock = r'<h4 id="mctm1-.">'+item.fulltitle+'</h4>(?P<block>.+?)</div>'
patron = r'[^>]+>[^>]+>.+?href="(?P<url>[^"]+)[^>]>(?P<title>[^<]+)\s<'
action = 'episodios'
elif item.args == 'search':
patronBlock = r'</h1> </header>(?P<block>.*?)</main>'
patron = r'(?:<img src="(?P<thumb>[^"]+)"[^>]+>)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="(?P<url>[^"]+)"[^>]+>(?:(?P<title>.+?)[ ](?P<episode>[\d&#;\d]+\d+|\d+..\d+)(?: \([a-zA-Z\s]+\) )(?:s\d+e\d+)?[ ]?(?:[&#\d;|.{3}]+)(?P<title2>[^&#\d;|^.{3}]+)(?:|.+?))<'
else:
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">'
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)<nav class="navigation pagination" role="navigation">'
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
def itemHook(item):
if item.args == 'newest':
item.show = item.title# + support.typo('Sub-ITA', '_ [] color kod')
return item
## debug = True # True per testare le regex sul sito
#debug = True
return locals()
@support.scrape
def episodios(item):
support.log(item)
#dbg
item.args = 'episodios'
#support.dbg()
deflang = 'Sub-ITA'
action = 'findvideos'
blacklist = ['']
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">(?P<lang>Sub-ITA)?'
patron = r'<div class="featured-thumb"> <a href="(?P<url>[^"]+)" title="(?:(?P<title>.+?)[ ]?(?P<episode>\d+&#215;\d+|\d+[×.]+\d+).+?&#8220;(?P<title2>.+?)&#8221;).+?">'
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)</main>'
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
## debug = True
#debug = True
return locals()
@support.scrape
def genres(item):
support.log()
#support.dbg()
action = 'peliculas'
patronBlock = r'<div id="mcTagMapNav">(?P<block>.+?)</div>'
patron = r'<a href="(?P<url>[^"]+)">(?P<title>.+?)</a>'
def itemHook(item):
item.url = host+'/elenco-serie-tv/'
item.contentType = 'tvshow'
return item
#debug = True
return locals()
def list_az(item):
log()
itemlist = []
alphabet = dict()
for i, (scrapedurl, scrapedtitle) in enumerate(serietv()):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(str(scrapedurl) + '||' + str(scrapedtitle))
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist
def cleantitle(scrapedtitle):
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('', '\'').replace('&#215;', 'x').replace('×', 'x').replace('"', "'")
return scrapedtitle.strip()
def serietv():
log()
itemlist = []
matches = support.match(Item(), r'<option class="level-0" value="([^"]+)">([^<]+)</option>',
r'<select\s*?name="cat"\s*?id="cat"\s*?class="postform"\s*?>(.*?)</select>', headers,
url="%s/" % host)[0]
index = 0
for cat, title in matches:
title = cleantitle(title)
url = '%s?cat=%s' % (host, cat)
## if int(level) > 0:
## itemlist[index - 1][0] += '{|}' + url
## continue
itemlist.append([url, title])
index += 1
return itemlist
def lista_serie(item):
log()
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
if '||' in item.url:
series = item.url.split('\n\n')
matches = []
for i, serie in enumerate(series):
matches.append(serie.decode('utf-8').split('||'))
series = matches
support.log("SERIE ALF :", series)
else:
series = serietv()
support.log("SERIE ALF 2 :", series)
for i, (scrapedurl, scrapedtitle) in enumerate(series):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(
Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scrapedtitle,
extra=item.extra,
contentType='tvshow',
originalUrl=scrapedurl,
folder=True))
support.checkHost(item, itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(series) >= p * PERPAGE:
next_page = item.url + '{}' + str(p + 1)
support.nextPage(itemlist, item, next_page=next_page)
return itemlist
############## Fondo Pagina
# da adattare al canale
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + '?s=' + text
# bisogna inserire item.contentType per la ricerca globale
# se il canale è solo film, si può omettere, altrimenti bisgona aggiungerlo e discriminare.
try:
item.args = 'search'
item.contentType = 'tvshow'
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
@@ -206,34 +116,33 @@ def search(item, text):
return []
# da adattare al canale
# inserire newest solo se il sito ha la pagina con le ultime novità/aggiunte
# altrimenti NON inserirlo
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
try:
item.contentType = 'tvshow'
item.args = 'newest'
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if categoria == 'series':
try:
item.contentType = 'tvshow'
item.args = 'newest'
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
return []
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
return []
return itemlist
def findvideos(item):
support.log('findvideos ->', item)
itemlist = []
patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">'
patron = r'<a href="([^"]+)">'
matches, data = support.match(item, patron, patronBlock, headers)
@@ -245,4 +154,24 @@ def findvideos(item):
resp = httptools.downloadpage(scrapedurl, follow_redirects=False)
data += resp.headers.get("location", "") + '\n'
return support.server(item, data)
itemlist += support.server(item, data)
data = httptools.downloadpage(item.url).data
patron = r'>Posted in <a href="https?://fastsubita.com/serietv/([^/]+)/(?:[^"]+)?"'
series = scrapertoolsV2.find_single_match(data, patron)
titles = support.typo(series.upper().replace('-', ' '), 'bold color kod')
goseries = support.typo("Vai alla Serie:", ' bold color kod')
itemlist.append(
Item(channel=item.channel,
title=goseries + titles,
fulltitle=titles,
show=series,
contentType='tvshow',
contentSerieName=series,
url=host+"/serietv/"+series,
action='episodios',
contentTitle=titles,
plot = "Vai alla Serie " + titles + " con tutte le puntate",
))
return itemlist

View File

@@ -3,68 +3,34 @@
"name": "Filmi Gratis",
"active": true,
"adult": false,
"language": ["ita"],
"language": ["ita", "sub-ita"],
"thumbnail": "filmigratis.png",
"banner": "filmigratis.png",
"categories": ["movie","tvshow"],
"categories": ["movie","tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "2", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -2,293 +2,156 @@
# ------------------------------------------------------------
# Canale per Filmi Gratis
# ------------------------------------------------------------
"""
La voce "Al cinema" si riferisce ai titoli che scorrono nella home page
Problemi:
- Nessuno noto
Novità, il canale, è presente in:
- FILM
"""
import re
from core import scrapertools, servertools, httptools, tmdb, support
from core import servertools, httptools, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
from platformcode import config
__channel__ = 'filmigratis'
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'vidoza', 'okru']
list_quality = ['1080p', '720p', '480p', '360']
checklinks = config.get_setting('checklinks', 'filmigratis')
checklinks_number = config.get_setting('checklinks_number', 'filmigratis')
list_servers = ['verystream', 'openload', 'streamango', 'vidoza', 'okru']
list_quality = ['1080p', '720p', '480p', '360']
headers = [['Referer', host]]
#-----------------------------------------------------------------------------------------------------------------------
@support.menu
def mainlist(item):
film = [
('Al Cinema ', ['', 'carousel']),
('Film alta definizione', ['', 'peliculas']),
('Categorie', ['', 'categorias_film']),
('Al Cinema ', ['', 'peliculas', 'cinema']),
('Categorie', ['', 'genres', 'genres']),
]
tvshow = [
('Categorie', ['', 'categorias_serie'])
tvshow = ['/serie/ALL',
('Generi', ['', 'genres', 'genres'])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.log()
if item.args == 'search':
action = ''
patron = r'<div class="cnt">.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>\s+(?P<title>.+?)(?:\[(?P<lang>Sub-ITA|SUB-ITA|SUB)\])?\s?(?:\[?(?P<quality>HD).+\]?)?\s?(?:\(?(?P<year>\d+)?\)?)?\s+<[^>]+>[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)"[^<]+<'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
elif item.contentType == 'movie':
if not item.args:
# voce menu: Film
patronBlock = r'<h1>Film streaming ita in alta definizione</h1>(?P<block>.*?)<div class="content-sidebar">'
patron = r'<div class="timeline-right">[^>]+>\s<a href="(?P<url>.*?)".*?src="(?P<thumb>.*?)".*?<h3 class="timeline-post-title">(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<'
patronNext = r'<a class="page-link" href="([^"]+)">>'
elif item.args == 'cinema':
patronBlock = r'<div class="owl-carousel" id="postCarousel">(?P<block>.*?)<section class="main-content">'
patron = r'background-image: url\((?P<thumb>.*?)\).*?<h3.*?>(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<.+?<a.+?<a href="(?P<url>[^"]+)"[^>]+>'
elif item.args == 'genres':
# ci sono dei titoli dove ' viene sostituito con " da support
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub('\n|\t', ' ', data)
patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
pagination = ''
patronNext = '<a class="page-link" href="([^"]+)">>>'
else:
action = 'episodios'
patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"'
## if item.args == 'search':
## patron = r'<div class="cnt">.*?src="([^"]+)".+?[^>]+>[^>]+>[^>]+>\s+((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)\s+<[^>]+>[^>]+>[^>]+>[ ]<a href="(?P<url>[^"]+)"'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
def itemHook(item):
if item.args == 'search':
if 'series' in item.url:
item.action = 'episodios'
item.contentType = 'tvshow'
else:
item.action = 'findvideos'
item.contentType = 'movie'
return item
#debug = True
return locals()
@support.scrape
def episodios(item):
support.log()
action = 'findvideos'
patronBlock = r'<div class="row">(?P<block>.*?)<section class="main-content">'
patron = r'href="(?P<url>.*?)">(?:.+?)?\s+S(?P<season>\d+)\s\-\sEP\s(?P<episode>\d+)[^<]+<'
return locals()
#-----------------------------------------------------------------------------------------------------------------------
def carousel(item):
logger.info('[filmigratis.py] carousel')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<div class="owl-carousel" id="postCarousel">(.*?)<section class="main-content">')
patron = r'background-image: url\((.*?)\).*?<h3.*?>(.*?)<.*?<a.*?<a href="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedthumb, scrapedtitle, scrapedurl, in matches:
itemlist.append(
Item(channel=item.channel,
action = "findvideos",
contentType = item.contentType,
title = scrapedtitle,
fulltitle = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumb,
args=item.args,
show = scrapedtitle,))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas(item):
logger.info('[filmigratis.py] peliculas')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<h1>Film streaming ita in alta definizione</h1>(.*?)<div class="content-sidebar">')
patron = r'<div class="timeline-left-wrapper">.*?<a href="(.*?)".*?src="(.*?)".*?<h3.*?>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedthumb, scrapedtitle, in matches:
itemlist.append(
Item(channel=item.channel,
action = "findvideos",
contentType = item.contentType,
title = scrapedtitle,
fulltitle = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumb,
args=item.args,
show = scrapedtitle))
patron = r'class="nextpostslink".*?href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[B]" + config.get_localized_string(30992) + "[/B]",
args=item.args,
url=next_page))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def categorias_film(item):
logger.info("[filmigratis.py] categorias_film")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'CATEGORIES.*?<ul>(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action="peliculas_categorias",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
args=item.args,
thumbnail=""))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def categorias_serie(item):
logger.info("[filmigratis.py] categorias_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'class="material-button submenu-toggle"> SERIE TV.*?<ul>.*?</li>(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
contentType='tvshow',
action="peliculas_serie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
args=item.args,
thumbnail=""))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas_categorias(item):
logger.info("[filmigratis.py] peliculas_categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="cnt">.*?src="(.*?)".*?title="([A-Z|0-9].*?)".*?<a href="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
if scrapedtitle == "":
scrapedtitle = scrapertools.find_single_match(data, r'<small>.*?([A-Z|0-9].*?) <')
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("È","È")
scrapedtitle = scrapedtitle.replace("–", "-")
scrapedtitle = scrapedtitle.replace("’", "'")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
args=item.args,
show=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas_serie(item):
logger.info("[filmigratis.py] peliculas_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'div class="cnt">[^s]+src="([^"]+).*?small>\s+[^A-Z](.*?)<.*?<a href="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
if scrapedtitle == "":
scrapedtitle = scrapertools.find_single_match(data, r'<small>.*?([A-Z|0-9].*?) <')
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("È","È")
scrapedtitle = scrapedtitle.replace("–", "-")
scrapedtitle = scrapedtitle.replace("’", "'")
scrapedtitle = scrapedtitle.replace(" ", "")
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType='tvshow',
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
args=item.args,
show=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def episodios(item):
logger.info("[filmigratis.py] episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="row">(.*?)<section class="main-content">')
patron = r'href="(.*?)".*?(S[^<]+) <'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("S0", "")
scrapedtitle = scrapedtitle.replace(" - EP ", "x")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumb,
args=item.args,
show=item.title))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.videolibrary(itemlist, item, 'color kod')
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info('[filmigratis.py] search')
item.url = host + '/search/?s=' + texto
if item.args == 'serie':
try:
return peliculas_serie(item)
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
@support.scrape
def genres(item):
support.log()
if item.contentType == 'movie':
action = 'peliculas'
patron = r'<a href="(?P<url>.*?)">(?P<title>.*?)<'
patronBlock = r'CATEGORIES.*?<ul>(?P<block>.*?)</ul>'
else:
try:
return peliculas_categorias(item)
item.contentType = 'tvshow'
action = 'peliculas'
blacklist = ['Al-Cinema']
patron = r'<a href="(?P<url>.*?)">(?P<title>.*?)<'
patronBlock = r'class="material-button submenu-toggle"> SERIE TV.*?<ul>.*?</li>(?P<block>.*?)</ul>'
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
return locals()
#-----------------------------------------------------------------------------------------------------------------------
def search(item, text):
support.log('search', item)
text = text.replace(' ', '+')
item.url = host + '/search/?s=' + text
try:
item.args = 'search'
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
log('search log:', line)
return []
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
item.contentType = 'movie'
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log({0}.format(line))
return []
return itemlist
def findvideos(item):
logger.info('[filmigratis.py] findvideos')
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + '[COLOR green][B] - ' + videoitem.title + '[/B][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.content
if item.args == "film":
support.videolibrary(itemlist, item, 'color kod')
autoplay.start(itemlist, item)
return itemlist
support.log()
return support.server(item)

View File

@@ -3,7 +3,7 @@
"name": "Filmpertutti",
"active": true,
"adult": false,
"language": ["ita"],
"language": ["ita", "sub-ita"],
"thumbnail": "filmpertutti.png",
"banner": "filmpertutti.png",
"categories": ["tvshow","movie"],

View File

@@ -1,235 +1,184 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per filmpertutti.co
# Canale per filmpertutti.py
# ------------------------------------------------------------
"""
Questi sono commenti per i beta-tester.
Su questo canale, nella categoria 'Ricerca Globale'
non saranno presenti le voci 'Aggiungi alla Videoteca'
e 'Scarica Film'/'Scarica Serie', dunque,
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Novità (globale). Indicare in quale/i sezione/i è presente il canale:
- film, serie
- I titoli in questa sezione a gruppi di 20
"""
import re
from channelselector import thumb
from core import scrapertoolsV2, servertools, httptools, tmdb, support
from core import scrapertoolsV2, httptools, support
from core.item import Item
from platformcode import config, logger
from specials import autoplay
from platformcode import config
__channel__ = 'filmpertutti'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
list_servers = ['verystream', 'openload', 'streamango', 'wstream', 'akvideo']
list_servers = ['speedvideo', 'verystream', 'openload', 'streamango', 'wstream', 'akvideo']
list_quality = ['HD', 'SD']
@support.menu
def mainlist(item):
film = ['/category/film/',
('Film per Genere', ['', 'genre'])
]
tvshow = ['/category/serie-tv/',
('in ordine alfabetico', ['/category/serie-tv/', 'az'])
]
film = ['/category/film/',
('Generi', ['/category/film/', 'genres', 'lettersF'])
]
tvshow = ['/category/serie-tv/',
('Aggiornamenti', ['/aggiornamenti-serie-tv/', 'peliculas', 'newest']),
('Per Lettera', ['/category/serie-tv/', 'genres', 'lettersS'])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.log()
if item.args != 'newest':
patronBlock = r'<ul class="posts">(?P<block>.*)<\/ul>'
patron = r'<li><a href="(?P<url>[^"]+)" data-thumbnail="(?P<thumb>[^"]+)">.*?<div class="title">(?P<title>.+?)(?:\[(?P<lang>Sub-ITA)\])?(?:[ ]\[?(?P<quality>[HD]+)?\])?(?:[ ]\((?P<year>\d+)\)?)?<\/div>'
patronNext = r'<a href="([^"]+)" >Pagina'
else:
patronBlock = r'<ul class="posts">(?P<block>.*)<div class="clear">'
patron = r'<li>\s?<a href="(?P<url>[^"]+)" data-thumbnail="(?P<thumb>[^"]+)">.*?<div class="title">(?P<title>.+?)(?:\s\[(?P<quality>HD)\])?<\/div>[^>]+>(?:[\dx]+)\s?(?:[ ]\((?P<lang>[a-zA-Z\-]+)\))?.+?</div>'
pagination = ''
if item.args == 'search':
action = 'select'
elif item.contentType == 'tvshow':
action = 'episodios'
elif item.contentType == 'movie':
action ='findvideos'
else:
action = 'select'
def itemHook(item):
item.title = item.title.replace(r'-', ' ')
return item
#debug = True
return locals()
@support.scrape
def episodios(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
if 'accordion-item' in data:
#patronBlock = r'<span class="season(?:|-title)">(?P<season>\d+)[^>]+>[^>]+>\s+?[^>]+>[^>]+>.+?(?:STAGIONE|Stagione).+?\s(?P<lang>[a-zA-Z\-]+).+?</span>(?P<block>.*?)<div id="disqus_thread">'
patronBlock = r'<span class="season(?:|-title)">(?P<season>\d+)[^>]+>[^>]+>\s+?[^>]+>[^>]+>.+?(?:STAGIONE|Stagione).+?\s(?P<lang>[a-zA-Z\-]+)</span>(?P<block>.*?)\s*(?:<li class="s_title">|<div id="disqus_thread">)'
patron = r'<img src="(?P<thumb>[^"]+)">.*?<li class="season-no">(?P<episode>.*?)<\/li>(?P<url>.*?javascript:;">(?P<title>[^<]+)<.+?)<\/table>'
else:
patronBlock = r'<div id="info" class="pad">(?P<block>.*?)<div id="disqus_thread">'
patron = r'<strong>(?P<lang>.*?)<\/strong>.*?<p>(?P<season>.*?)<span'
#debug = True
return locals()
def newest(categoria):
logger.info("filmpertutti newest" + categoria)
@support.scrape
def genres(item):
support.log()
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host + "/category/film/"
item.action = "peliculas"
item.extra = "movie"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if item.args == 'lettersF':
item.contentType = 'movie'
else:
item.contentType = 'tvshow'
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
action = 'peliculas'
patronBlock = r'<select class="cats">(?P<block>.*?)<\/select>'
patron = r'<option data-src="(?P<url>[^"]+)">(?P<title>.*?)<\/option>'
return itemlist
return locals()
def select(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
patronBlock = scrapertoolsV2.find_single_match(data, r'class="taxonomy category" ><span property="name">(.*?)</span></a><meta property="position" content="2">')
if patronBlock.lower() != 'film':
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
contentSerieName = item.fulltitle,
url=item.url,
contentType='tvshow'))
else:
support.log('select = ### è un movie ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
def search(item, texto):
logger.info("filmpertutti " + item.url + " search " + texto)
support.log()
item.url = host + "/?s=" + texto
item.contentType = 'episode'
item.args = 'search'
try:
return peliculas(item)
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.log("%s" % line)
return []
def genre(item):
logger.info(item.channel + 'genre')
def newest(categoria):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<ul class="table-list">(.*?)<\/ul>')
matches = scrapertoolsV2.find_multiple_matches(block, r'<a href="([^"]+)">.*?<\/span>(.*?)<\/a>')
for url, title in matches:
itemlist.append(
Item(channel=item.channel,
action='peliculas',
title=title,
url=host+url)
)
itemlist = thumb(itemlist)
return itemlist
def az(item):
logger.info(item.channel + 'genre')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<select class="cats">(.*?)<\/select>')
matches = scrapertoolsV2.find_multiple_matches(block, r'<option data-src="([^"]+)">(.*?)<\/option>')
for url, title in matches:
itemlist.append(
Item(channel=item.channel,
action='peliculas',
title=title,
url=url)
)
itemlist = thumb(itemlist)
return itemlist
def peliculas(item):
logger.info(item.channel + 'peliculas')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<ul class="posts">(.*)<\/ul>')
patron = r'<li><a href="([^"]+)" data-thumbnail="([^"]+)">.*?<div class="title">([^<]+)<\/div>'
matches = scrapertoolsV2.find_multiple_matches(block, patron)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
title = re.sub(r'.\(.*?\)|.\[.*?\]', '', scrapedtitle)
quality = scrapertoolsV2.find_single_match(scrapedtitle, r'\[(.*?)\]')
if not quality:
quality = 'SD'
longtitle = title + ' [COLOR blue][' + quality + '][/COLOR]'
if item.contentType == 'tvshow':
action = 'episodios'
item = Item()
try:
if categoria == "peliculas":
item.url = host + "/category/film/"
item.action = "peliculas"
item.extra = "movie"
item.contentType = 'movie'
itemlist = peliculas(item)
else:
action ='findvideos'
item.url = host + "/aggiornamenti-serie-tv/"
item.action = "peliculas"
item.args = "newest"
item.contentType = 'tvshow'
itemlist = peliculas(item)
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
quality=quality,
url=scrapedurl,
thumbnail=scrapedthumb
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## if itemlist[-1].action == "peliculas":
## itemlist.pop()
next_page = scrapertoolsV2.find_single_match(data, '<a href="([^"]+)">Pagina')
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
contentType=item.contentType,
title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]",
url=next_page,
thumbnails=thumb()))
return itemlist
def episodios(item):
logger.info(item.channel + 'findvideos')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
if 'accordion-item' in data:
block = scrapertoolsV2.find_single_match(data, 'accordion-item.*?>(.*?)<div id="disqus_thread">')
patron = r'<img src="([^"]+)">.*?<li class="season-no">(.*?)<\/li>(.*?)<\/table>'
matches = scrapertoolsV2.find_multiple_matches(block, patron)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
title = scrapedtitle + ' - ' + item.title
if title[0] == 'x':
title = '1' + title
itemlist.append(
Item(channel=item.channel,
action='findvideos',
contentType='episode',
title=title,
fulltitle=title,
show=title,
quality=item.quality,
url=scrapedurl,
thumbnail=scrapedthumb
))
else:
block = scrapertoolsV2.find_single_match(data, '<div id="info" class="pad">(.*?)<div id="disqus_thread">').replace('</p>','<br />').replace('×','x')
matches = scrapertoolsV2.find_multiple_matches(block, r'<strong>(.*?)<\/strong>.*?<p>(.*?)<span')
for lang, seasons in matches:
lang = re.sub('.*?Stagione[^a-zA-Z]+', '', lang)
# patron = r'([0-9]+x[0-9]+) (.*?)<br'
season = scrapertoolsV2.find_multiple_matches(seasons, r'([0-9]+x[0-9]+) (.*?)<br')
for title, url in season:
title = title + ' - ' + lang
itemlist.append(
Item(channel=item.channel,
title=title,
fulltitle=title,
show=title,
url=url,
contentType='episodie',
action='findvideos'
))
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log("{0}".format(line))
return []
return itemlist
def findvideos(item):
logger.info(item.channel + 'findvideos')
if item.contentType == 'movie':
data = httptools.downloadpage(item.url, headers=headers).data
return support.server(item)
else:
data = item.url
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + ' - [COLOR limegreen][[/COLOR]' + videoitem.title + '[COLOR limegreen]][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.quality = item.quality
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]'+config.get_localized_string(30161)+'[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.fulltitle))
return itemlist
return support.server(item, item.url)

View File

@@ -7,5 +7,23 @@
"thumbnail": "guardaserieclick.png",
"bannermenu": "guardaserieclick.png",
"categories": ["tvshow", "anime"],
"settings": []
"not_active": ["include_in_newest_peliculas", "include_in_newest_anime"],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,24 +1,22 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Guardaserie.click
# Canale per guardaserieclick
# ------------------------------------------------------------
"""
Problemi noti che non superano il test del canale:
NESSUNO (update 13-9-2019)
Avvisi per il test:
La voce "Serie TV" mostra per ogni pagina 24 titoli
Problemi noti:
- nella pagina categorie appaiono i risultati di tmdb in alcune voci
- Le voci del menu le trovi in "lista serie" del sito, e Generi = Sfoglia
- SE capita che entrando in una voce trovi "nessun elemento" torna indietro e rientra nella voce.
- Tutte le voci, tranne: Anime/Cartoni, mostrano per ogni pagina, al max 25 titoli
Presente in NOVITà:
- Serietv
"""
from core import scrapertoolsV2, httptools, support
from core import support
from core.item import Item
from platformcode import logger, config
from platformcode import config
from core.support import log
__channel__ = 'guardaserieclick'
@@ -30,94 +28,145 @@ list_quality = ['default']
@support.menu
def mainlist(item):
tvshow = ['/lista-serie-tv',
('Ultimi Aggiornamenti', ['/lista-serie-tv', 'peliculas', 'new']),
('Categorie', ['categorie', 'categorie']),
('Serie inedite Sub-ITA', ['/lista-serie-tv', 'peliculas', 'ined']),
('Da non perdere', ['/lista-serie-tv', 'peliculas', ['tv', 'da non perdere']]),
('Classiche', ["/lista-serie-tv", 'peliculas', ['tv', 'classiche']]),
('Anime', ["/category/animazione/", 'tvserie', 'tvshow','anime'])
('Aggiornamenti', ['/lista-serie-tv', 'peliculas', 'update']),
('Generi', ['/categorie', 'genres', 'genres']),
('News Sub-ITA', ['/lista-serie-tv', 'peliculas', 'ined']),
('Da non perdere', ['/lista-serie-tv', 'peliculas', 'nolost']),
('Classiche', ["/lista-serie-tv", 'peliculas', 'classic']),
('Anime/Cartoni', ["/category/animazione/", 'peliculas', 'genres'])
]
return locals()
##@support.scrape
##def peliculas(item):
#### import web_pdb; web_pdb.set_trace()
## log('peliculas ->\n', item)
##
## action = 'episodios'
## block = r'(?P<block>.*?)<div\s+class="btn btn-lg btn-default btn-load-other-series">'
##
## if item.args == 'ined':
## deflang = 'SUB-ITA'
## patronBlock = r'<span\s+class="label label-default label-title-typology">'+block
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
## pagination = 25
## elif item.args == 'update':
## patronBlock = r'<div\s+class="container-fluid greybg title-serie-lastep title-last-ep fixed-title-wrapper containerBottomBarTitle">'+block
## patron = r'<a(?: rel="[^"]+")? href="(?P<url>[^"]+)"(?: class="[^"]+")?>[ ]<img class="[^"]+"[ ]title="[^"]+"[ ]alt="[^"]+"[ ]src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<episode>\d+.\d+)[ ]\((?P<lang>[a-zA-Z\-]+)[^<]+<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)<'
## elif item.args == 'genres':
## patronBlock = r'<h2 style="color: white !important" class="title-typology">(?P<block>.+?)<div class="container-fluid whitebg" style="">'
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)</p>'
## patronNext = r'rel="next" href="([^"]+)">'
## item.contentType = 'tvshow'
## elif item.args == 'nolost':
## patronBlock = r'<h2 class="title-typology styck-top" meta-class="title-serie-danonperd">'+block
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
## pagination = 25
## elif item.args == 'classic':
## patronBlock = r'<h2 class="title-typology styck-top" meta-class="title-serie-classiche">'+block
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
## pagination = 25
## else:
## patronBlock = r'<div\s+class="container container-title-serie-new container-scheda" meta-slug="new">'+block
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
## pagination = 25
##
## debug = True
## return locals()
@support.scrape
def peliculas(item):
## import web_pdb; web_pdb.set_trace()
log('serietv ->\n', item)
if item.args == 'ined':
#data = httptools.downloadpage(item.url).data
log("Sono qui orco")
pagination = 24
action = 'episodios'
patron_block = r'<span\s+class="label label-default label-title-typology">'\
'(?P<lang>[^<]+)</span>'
else:
pagination = 24
action = 'episodios'
patronBlock = r'<div\s+class="container container-title-serie-new container-scheda" '\
'meta-slug="new">(?P<block>.*?)<div\s+class='\
'"btn btn-lg btn-default btn-load-other-series">'
patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>'\
'[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
debug = True
return locals()
@support.scrape
def tvserie(item):
log('peliculas ->\n', item)
action = 'episodios'
## listGroups = ['url', 'thumb', 'title']
patron = r'<a\shref="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)" />'\
'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)</p></div>'
patron_block = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(?P<block>.*?)'\
'<div\sclass="container-fluid whitebg" style="">'
patronNext = r'<link\s.*?rel="next"\shref="([^"]+)"'
blacklist = ['DMCA']
if item.args == 'genres' or item.args == 'search':
patronBlock = r'<h2 style="color: white !important" class="title-typology">(?P<block>.+?)<div class="container-fluid whitebg" style="">'
patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)</p>'
patronNext = r'rel="next" href="([^"]+)">'
item.contentType = 'tvshow'
## elif item.args == 'search':
## patronBlock = r'<h2 style="color:\s?white !important.?" class="title-typology">(?P<block>.*?)<div class="container-fluid whitebg" style="">'
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)</p>'
else:
end_block = r'(?P<block>.*?)<div\s+class="btn btn-lg btn-default btn-load-other-series">'
patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
pagination = 25
if item.args == 'ined':
deflang = 'SUB-ITA'
patronBlock = r'<span\s+class="label label-default label-title-typology">'+end_block
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
## pagination = 25
elif item.args == 'update':
patronBlock = r'<div\s+class="container-fluid greybg title-serie-lastep title-last-ep fixed-title-wrapper containerBottomBarTitle">'+end_block
patron = r'<a(?: rel="[^"]+")? href="(?P<url>[^"]+)"(?: class="[^"]+")?>[ ]<img class="[^"]+"[ ]title="[^"]+"[ ]alt="[^"]+"[ ](?:|meta-)?src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?:\d+.\d+)[ ]\((?P<lang>[a-zA-Z\-]+)[^<]+<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)<'
elif item.args == 'nolost':
patronBlock = r'<h2 class="title-typology styck-top" meta-class="title-serie-danonperd">'+end_block
## pagination = 25
elif item.args == 'classic':
patronBlock = r'<h2 class="title-typology styck-top" meta-class="title-serie-classiche">'+end_block
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
## pagination = 25
## elif item.args == 'anime':
##
else:
patronBlock = r'<div\s+class="container container-title-serie-new container-scheda" meta-slug="new">'+end_block
## patron = r'<a href="(?P<url>[^"]+)".*?>\s<img\s.*?src="(?P<thumb>[^"]+)"\s/>[^>]+>[^>]+>\s[^>]+>\s(?P<year>\d{4})?\s.+?class="strongText">(?P<title>.+?)<'
## pagination = 25
#support.regexDbg(item, patron, headers)
#debug = True
return locals()
@support.scrape
def episodios(item):
log('episodios ->\n')
item.contentType = 'episode'
log()
action = 'findvideos'
## listGroups = ['episode', 'lang', 'title2', 'plot', 'title', 'url']
patron = r'class="number-episodes-on-img"> (?P<episode>\d+.\d+)'\
'(?:|[ ]\((?P<lang>.*?)\))<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
'(?P<title2>.*?)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
'(?P<plot>.*?)<[^>]+></div></div>.<span\s.+?meta-serie="(?P<title>.*?)"'\
'meta-stag=(?P<url>.*?)</span>'
patron = r'<div class="number-episodes-on-img">\s?\d+.\d+\s?(?:\((?P<lang>[a-zA-Z\-]+)\))?</div>.+?(?:<span class="pull-left bottom-year">(?P<title2>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]+)<[^>]+>[^>]+>[^>]+>\s?)?<span(?: meta-nextep="[^"]+")? class="[^"]+" meta-serie="(?P<title>[^"]+)" meta-stag="(?P<season>\d+)" meta-ep="(?P<episode>\d+)" meta-embed="(?P<url>[^>]+)">'
patronBlock = r'<h2 class="title-typology">Episodi (?P<stagione>\d+).{1,3}Stagione</h2>(?P<block>.*?)<div class="container">'
def itemHook(item):
item.title = item.title.replace(item.fulltitle, '').replace('-','',1)
return item
#debug = True
return locals()
def findvideos(item):
log()
return support.server(item, item.url)
@support.scrape
def categorie(item):
action = 'tvserie'
#listGroups = ['url', 'title']
def genres(item):
log()
action = 'peliculas'
patron = r'<li>\s<a\shref="(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)</a></li>'
patron_block = r'<ul\sclass="dropdown-menu category">(?P<block>.*?)</ul>'
item.contentType = ''
return locals()
# ================================================================================================================
def search(item, text):
log(text)
item.url = host + "/?s=" + text
item.contentType = 'tvshow'
item.args = 'search'
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
log("%s" % line)
return []
def newest(categoria):
log()
itemlist = []
item = Item()
item.contentType= 'episode'
item.contentType= 'tvshow'
item.args = 'update'
try:
if categoria == "series":
@@ -125,30 +174,19 @@ def newest(categoria):
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
## if itemlist[-1].action == "peliculas":
## itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
log("{0}".format(line))
return []
return itemlist
### ================================================================================================================
### ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
log(texto)
item.url = host + "/?s=" + texto
item.args = 'cerca'
try:
return tvserie(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
log('--->', item)
return support.server(item, item.url)

View File

@@ -15,6 +15,6 @@
"default": false,
"enabled": false,
"visible": false
},
}
]
}

View File

@@ -166,7 +166,7 @@ def peliculas(item):
patronNext = '<span class="current">[^<]+<[^>]+><a href="([^"]+)"'
debug = True
## debug = True
return locals()
@@ -238,4 +238,3 @@ def findvideos(item):
data += str(scrapertoolsV2.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">'))
itemlist = support.server(item, data)
return itemlist

View File

@@ -6,65 +6,7 @@
"language": ["ita"],
"thumbnail": "https:\/\/mondoserietv.com\/wp-content\/uploads\/2018\/04\/logo.png",
"bannermenu": "https:\/\/mondoserietv.com\/wp-content\/uploads\/2018\/04\/logo.png",
"categories": ["movie","anime","tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
"categories": ["movie","anime","tvshow","documentary"],
"not_active":["include_in_newest_anime","include_in_newest_documentary"],
"settings": []
}

View File

@@ -1,357 +1,116 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per mondoserietv
#
# ----------------------------------------------------------
import re
from core import scrapertools, servertools, httptools, scrapertoolsV2
from core import tmdb
from core.item import Item
from lib.unshortenit import unshorten
from platformcode import logger, config
from specials import autoplay
from core import support
__channel__ = "mondoserietv"
host = config.get_channel_url(__channel__)
host = support.config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['akstream']
list_servers = ['akstream', 'wstream', 'vidtome', 'backin', 'nowvideo', 'verystream']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'mondoserietv')
checklinks_number = config.get_setting('checklinks_number', 'mondoserietv')
headers = {'Referer': host}
PERPAGE = 14
@support.menu
def mainlist(item):
logger.info("kod.mondoserietvmainlist")
autoplay.init(item.channel, list_servers, list_quality)
film = ['/lista-film',
('Ultimi Film Aggiunti', ['/ultimi-film-aggiunti', 'peliculas' , 'last'])]
itemlist = [Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]Lista Serie Tv Anni 50 60 70 80[/COLOR]",
url=("%s/lista-serie-tv-anni-60-70-80/" % host),
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]Lista Serie Tv Italiane[/COLOR]",
url=("%s/lista-serie-tv-italiane/" % host),
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]Lista Cartoni Animati & Anime[/COLOR]",
url=("%s/lista-cartoni-animati-e-anime/" % host),
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Lista Film[/COLOR]",
url=("%s/lista-film/" % host),
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca Film...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca SerieTV...[/COLOR]",
action="search",
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
tvshow = ['/lista-serie-tv',
('HD {TV}', ['/lista-serie-tv-in-altadefinizione']),
('Anni 50 60 70 80 {TV}',['/lista-serie-tv-anni-60-70-80']),
('Serie Italiane',['/lista-serie-tv-italiane'])]
return itemlist
anime = ['/lista-cartoni-animati-e-anime']
def search(item, texto):
logger.info("kod.mondoserietv search " + texto)
item.url = "%s/?s=%s" % (host, texto)
docu = [('Documentari bullet bold',['/lista-documentari', 'peliculas', '', 'tvshow']),
('Cerca Documentari... submenu bold', ['/lista-documentari', 'search', '', 'tvshow'])]
return locals()
@support.scrape
def search(item, text):
support.log(text)
if item.contentType == 'movie' or item.extra == 'movie':
action = 'findvideos'
else:
action = 'episodios'
try:
if item.extra == "movie":
return search_peliculas(item)
if item.extra == "tvshow":
return search_peliculas_tv(item)
search = text
data = support.match(item, headers=headers)[1]
if 'lcp_nextlink' in data:
data += support.match(item, url=support.scrapertoolsV2.find_single_match(data, r'href="([^"]+)" title="[^"]+" class="lcp_nextlink"'), headers=headers)[1]
patron = r'<li><a href="(?P<url>[^"]+)" title="(?P<title>.*?)(?:\s(?P<year>\d{4}))?"[^>]*>'
return locals()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.logger.error("%s" % line)
return []
def search_peliculas(item):
logger.info("kod.mondoserietv search_peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
def newest(categoria):
support.log(categoria)
item = support.Item()
try:
if categoria == "series":
item.contentType= 'tvshow'
item.url = host + '/ultimi-episodi-aggiunti'
item.args = "lastep"
if categoria == "peliculas":
item.contentType= 'movie'
item.url = host + '/ultimi-film-aggiunti'
item.args = "last"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
# Estrae i contenuti
patron = '<div class="boxinfo">\s*<a href="([^"]+)">\s*<span class="tt">(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search_peliculas_tv(item):
logger.info("kod.mondoserietv search_peliculas_tv")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<div class="boxinfo">\s*<a href="([^"]+)">\s*<span class="tt">(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@support.scrape
def peliculas(item):
logger.info("kod.mondoserietv film")
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, '<div class="entry-content pagess">(.*?)</ul>')
patron = r'<a href="(.*?)" title="(.*?)">'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
contentType="movie",
action="findvideos",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
show=item.fulltitle,
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def lista_serie(item):
logger.info("kod.mondoserietv novità")
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, '<div class="entry-content pagess">(.*?)</ul>')
patron = r'<a href="(.*?)" title="(.*?)">'
matches = re.compile(patron, re.DOTALL).findall(blocco)
scrapertools.printMatches(matches)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
show=item.fulltitle,
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="lista_serie",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
pagination = ''
patronNext = r'href="([^"]+)" title="[^"]+" class="lcp_nextlink"'
if item.args == 'last':
patronBlock = r'<table>(?P<block>.*?)</table>'
patron = r'<tr><td><a href="(?P<url>[^"]+)">\s*[^>]+>(?P<title>.*?)(?:\s(?P<year>\d{4}))? (?:Streaming|</b>)'
elif item.args == 'lastep':
patronBlock = r'<table>(?P<block>.*?)</table>'
patron = r'<td>\s*<a href="[^>]+>(?P<title>.*?)(?:\s(?P<year>\d{4}))?\s(?:(?P<episode>(?:\d+x\d+|\d+)))\s*(?P<title2>[^<]+)(?P<url>.*?)<tr>'
action = 'findvideos'
else:
patronBlock = r'<div class="entry-content pagess">(?P<block>.*?)</ul>'
patron = r'<li><a href="(?P<url>[^"]+)" title="(?P<title>.*?)(?:\s(?P<year>\d{4}))?"[^>]*>'
if item.contentType == 'tvshow':
action = 'episodios'
anime = True
return locals()
@support.scrape
def episodios(item):
logger.info("kod.mondoserietv episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, '<table>(.*?)</table>')
patron = "<tr><td><b>(.*?)(\d+)((?:x\d+| ))(.*?)<\/b>(.*?<tr>)"
matches = scrapertoolsV2.find_multiple_matches(blocco, patron)
for t1, s, e, t2, scrapedurl in matches:
if "x" not in e:
e = s
if e == s:
s = None
if s is None:
s = "1"
if s.startswith('0'):
s = s.replace("0", "")
if e.startswith('x'):
e = e.replace("x", "")
scrapedtitle = t1 + s + "x" + e + " " + t2
itemlist.append(
Item(channel=item.channel,
contentType="episode",
action="findvideos",
items=s,
iteme=e,
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=item.scrapedthumbnail,
plot=item.scrapedplot,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
anime = True
pagination = 50
patronBlock = r'<table>(?P<block>.*?)</table>'
patron = r'<tr><td><b>(?P<title>(?:\d+)?.*?)\s*(?:(?P<episode>(?:\d+x\d+|\d+)))\s*(?P<title2>[^<]+)(?P<url>.*?)<tr>'
def itemHook(item):
clear = support.re.sub(r'\[[^\]]+\]', '', item.title)
if clear.isdigit():
item.title = support. typo('Episodio ' + clear, 'bold')
return item
return locals()
def findvideos(item):
logger.info(" findvideos")
if item.contentType != "episode":
return findvideos_movie(item)
itemlist = servertools.find_video_items(data=item.url)
logger.info(itemlist)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
# Requerido para Filtrar enlaces
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def findvideos_movie(item):
logger.info(" findvideos_movie")
# Carica la pagina
data = httptools.downloadpage(item.url).data
patron = r"<a href='([^']+)'[^>]*?>[^<]*?<img src='[^']+' style='[^']+' alt='[^']+'>[^<]+?</a>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
url, c = unshorten(scrapedurl)
data += url + '\n'
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist
if item.contentType == 'movie': return support.server(item)
else: return support.server(item, item.url)

View File

@@ -228,7 +228,6 @@ def get_default_settings(channel_name):
categories = get_channel_json(channel_name).get('categories', list())
not_active = get_channel_json(channel_name).get('not_active', list())
default_off = get_channel_json(channel_name).get('default_off', list())
logger.info('NON ATTIVI= ' + str(not_active))
# Apply default configurations if they do not exist
for control in default_controls:
@@ -364,6 +363,7 @@ def set_channel_setting(name, value, channel):
file_settings = os.path.join(config.get_data_path(), "settings_channels", channel + "_data.json")
dict_settings = {}
def_settings = get_default_settings(channel)
dict_file = None
@@ -377,10 +377,22 @@ def set_channel_setting(name, value, channel):
dict_settings[name] = value
# delete unused Settings
def_keys = []
del_keys = []
for key in def_settings:
def_keys.append(key['id'])
for key in dict_settings:
if key not in def_keys:
del_keys.append(key)
for key in del_keys:
del dict_settings[key]
# comprobamos si existe dict_file y es un diccionario, sino lo creamos
if dict_file is None or not dict_file:
dict_file = {}
dict_file['settings'] = dict_settings
# Creamos el archivo ../settings/channel_data.json

View File

@@ -217,11 +217,11 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
if scraped['season'] != None:
season = scraped['season']
if stagione:
episode = season +'x'+ scraped['episode']
elif item.contentType == 'tvshow' and (scraped['episode'] == '' and season == ''):
if scraped['season']:
episode = scraped['season'] +'x'+ scraped['episode']
elif stagione:
episode = stagione +'x'+ scraped['episode']
elif item.contentType == 'tvshow' and (scraped['episode'] == '' and scraped['season'] == '' and stagione == ''):
item.news = 'season_completed'
episode = ''
else:
@@ -400,12 +400,13 @@ def scrape(func):
if 'itemlistHook' in args:
itemlist = args['itemlistHook'](itemlist)
if patronNext and inspect.stack()[1][3] != 'newest':
nextPage(itemlist, item, data, patronNext, function)
if (pagination and len(matches) <= pag * pagination) or not pagination: # next page with pagination
if patronNext and inspect.stack()[1][3] != 'newest':
nextPage(itemlist, item, data, patronNext, function)
# next page for pagination
if pagination and len(matches) >= pag * pagination:
if pagination and len(matches) > pag * pagination and not search:
if inspect.stack()[1][3] != 'get_newest':
itemlist.append(
Item(channel=item.channel,
@@ -426,7 +427,7 @@ def scrape(func):
if anime:
if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold')
else: autorenumber.renumber(itemlist)
if anime and autorenumber.check(item) == False:
if anime and autorenumber.check(item) == False and not scrapertoolsV2.find_single_match(itemlist[0].title, r'(\d+.\d+)'):
pass
else:
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
@@ -516,7 +517,7 @@ def dooplay_search_vars(item, blacklist):
if item.contentType == 'list': # ricerca globale
type = '(?P<type>movies|tvshows)'
typeActionDict = {'findvideos': ['movies'], 'episodios': ['tvshows']}
typeContentDict = {'movie': ['movies'], 'episode': ['tvshows']}
typeContentDict = {'movie': ['movies'], 'tvshow': ['tvshows']}
elif item.contentType == 'movie':
type = 'movies'
action = 'findvideos'
@@ -526,12 +527,6 @@ def dooplay_search_vars(item, blacklist):
patron = '<div class="result-item">.*?<img src="(?P<thumb>[^"]+)".*?<span class="' + type + '">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a>.*?<span class="year">(?P<year>[0-9]{4}).*?<div class="contenido"><p>(?P<plot>[^<>]+)'
patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"'
# def fullItemlistHook(itemlist):
# # se è una next page
# if itemlist[-1].title == typo(config.get_localized_string(30992), 'color kod bold'):
# itemlist[-1].action = 'peliculas'
# itemlist[-1].args = 'searchPage'
# return itemlist
return locals()
def swzz_get_url(item):
@@ -916,7 +911,7 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru
videoitem.server = findS[2]
videoitem.title = findS[0]
item.title = item.contentTitle if config.get_localized_string(30161) in item.title else item.title
videoitem.title = item.fulltitle + (typo(videoitem.title, '_ color kod []') if videoitem.title else "") + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "")
videoitem.title = item.title + (typo(videoitem.title, '_ color kod []') if videoitem.title else "") + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "")
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail

View File

@@ -224,6 +224,7 @@ def save_movie(item):
return 0, 0, -1
def filter_list(episodelist, action=None, path=None):
if path: path = path.decode('utf8')
channel_prefs = {}
lang_sel = quality_sel = show_title = channel =''
if action:
@@ -396,13 +397,13 @@ def save_tvshow(item, episodelist):
_id = item.infoLabels['code'][0]
if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']:
base_name = item.infoLabels['originaltitle']
base_name = item.infoLabels[u'originaltitle']
elif item.infoLabels['tvshowtitle']:
base_name = item.infoLabels['tvshowtitle']
base_name = item.infoLabels[u'tvshowtitle']
elif item.infoLabels['title']:
base_name = item.infoLabels['title']
base_name = item.infoLabels[u'title']
else:
base_name = item.contentSerieName
base_name = u'%s' % item.contentSerieName
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8")

View File

@@ -35,7 +35,7 @@ class UnshortenIt(object):
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_cryptmango_regex = r'cryptmango'
_cryptmango_regex = r'cryptmango|xshield\.net'
_vcrypt_regex = r'vcrypt\.net'
_maxretries = 5
@@ -467,6 +467,7 @@ class UnshortenIt(object):
except Exception as e:
return uri, str(e)
def _unshorten_vcrypt(self, uri):
r = None
import base64, pyaes

View File

@@ -5,14 +5,14 @@ import os
import shutil
from cStringIO import StringIO
from core import httptools, filetools, downloadtools
from core.ziptools import ziptools
from core import httptools, filetools
from platformcode import logger, platformtools
import json
import xbmc
import re
import xbmcaddon
from lib import githash
import urllib
addon = xbmcaddon.Addon('plugin.video.kod')
@@ -21,7 +21,7 @@ _hdr_pat = re.compile("^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@.*")
branch = 'master'
user = 'kodiondemand'
repo = 'addon'
addonDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/'
addonDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))).replace('\\', '/') + '/'
maxPage = 5 # le api restituiscono 30 commit per volta, quindi se si è rimasti troppo indietro c'è bisogno di andare avanti con le pagine
trackingFile = "last_commit.txt"
@@ -109,15 +109,12 @@ def check_addon_init():
alreadyApplied = False
else: # nel caso ci siano stati problemi
logger.info('lo sha non corrisponde, scarico il file')
remove(addonDir + file["filename"])
downloadtools.downloadfile(file['raw_url'], addonDir + file['filename'],
silent=True, continuar=True, resumir=False)
localFile.close()
urllib.urlretrieve(file['raw_url'], os.path.join(addonDir, file['filename']))
else: # è un file NON testuale, lo devo scaricare
# se non è già applicato
if not (filetools.isfile(addonDir + file['filename']) and getSha(addonDir + file['filename']) == file['sha']):
remove(addonDir + file["filename"])
downloadtools.downloadfile(file['raw_url'], addonDir + file['filename'], silent=True,
continuar=True, resumir=False)
urllib.urlretrieve(file['raw_url'], os.path.join(addonDir, file['filename']))
alreadyApplied = False
elif file['status'] == 'removed':
remove(addonDir+file["filename"])
@@ -237,7 +234,6 @@ def updateFromZip():
remove(localfilename)
removeTree(destpathname + "addon-" + branch)
import urllib
urllib.urlretrieve(remotefilename, localfilename,
lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp))
@@ -260,6 +256,7 @@ def updateFromZip():
# puliamo tutto
removeTree(addonDir)
xbmc.sleep(1000)
rename(destpathname + "addon-" + branch, addonDir)
@@ -275,34 +272,25 @@ def updateFromZip():
def remove(file):
if os.path.isfile(file):
removed = False
while not removed:
try:
os.remove(file)
removed = True
except:
logger.info('File ' + file + ' NON eliminato')
try:
os.remove(file)
except:
logger.info('File ' + file + ' NON eliminato')
def removeTree(dir):
if os.path.isdir(dir):
removed = False
while not removed:
try:
shutil.rmtree(dir)
removed = True
except:
logger.info('Cartella ' + dir + ' NON eliminato')
try:
shutil.rmtree(dir)
except:
logger.info('Cartella ' + dir + ' NON eliminata')
def rename(dir1, dir2):
renamed = False
while not renamed:
try:
filetools.rename(dir1, dir2)
renamed = True
except:
logger.info('cartella ' + dir1 + ' NON rinominata')
try:
filetools.rename(dir1, dir2)
except:
logger.info('cartella ' + dir1 + ' NON rinominata')
# https://stackoverflow.com/questions/3083235/unzipping-file-results-in-badzipfile-file-is-not-a-zip-file
@@ -331,15 +319,4 @@ def _pbhook(numblocks, blocksize, filesize, url, dp):
dp.update(percent)
except:
percent = 90
dp.update(percent)
def remove(file):
if os.path.isfile(file):
removed = False
while not removed:
try:
os.remove(file)
removed = True
except:
logger.info('File ' + file + ' NON eliminato')
dp.update(percent)

View File

@@ -20,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
patronvideos = [
r'(https?://(gestyy|rapidteria|sprysphere)\.com/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(vcrypt|linkup)\.[^/]+/[^/]+/[a-zA-Z0-9_]+)',
r'(https?://(?:www\.)?(bit)\.ly/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(bit)\.ly/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(xshield)\.[^/]+/[^/]+/[^/]+/[a-zA-Z0-9_\.]+)'
]
for patron in patronvideos:
@@ -41,6 +42,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
replace_headers=True,
headers={'User-Agent': 'curl/7.59.0'})
data = resp.headers.get("location", "")
elif 'xshield' in url:
from lib import unshortenit
data, status = unshortenit.unshorten(url)
logger.info("Data - Status zcrypt xshield.net: [%s] [%s] " %(data, status))
elif 'vcrypt.net' in url:
from lib import unshortenit
data, status = unshortenit.unshorten(url)
@@ -49,7 +54,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
idata = httptools.downloadpage(url).data
data = scrapertoolsV2.find_single_match(idata, "<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>")
#fix by greko inizio
if not data:
if not data:
data = scrapertoolsV2.find_single_match(idata, 'action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">')
from lib import unshortenit
data, status = unshortenit.unshorten(url)

42
servers/hdload.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://hdload\\.space/public/dist/index\\.html\\?id=([a-z0-9]+)",
"url": "https://hdload.space/getHost/\\1"
}
]
},
"free": true,
"id": "hdload",
"name": "hdload",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://mixdrop.co/imgs/mixdrop-logo2.png"
}

31
servers/hdload.py Normal file
View File

@@ -0,0 +1,31 @@
# -*- coding: utf-8 -*-
from core import httptools, scrapertoolsV2
from platformcode import config, logger
import base64
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'Not found id' in data:
return False, config.get_localized_string(70449) % "hdload"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
logger.info(page_url)
page_url = 'https://hdload.space/getHost/' + scrapertoolsV2.find_single_match(page_url, 'https://hdload\.space/public/dist/index\.html\?id=([a-z0-9]+)')
logger.info(page_url)
data = httptools.downloadpage(page_url, post='').data
logger.info(data)
url = base64.b64decode(data)
itemlist.append([".mp4 [hdload]", url])
return itemlist

42
servers/mixdrop.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "mixdrop.co/(?:f|e)/([a-z0-9]+)",
"url": "https://mixdrop.co/e/\\1"
}
]
},
"free": true,
"id": "mixdrop",
"name": "MixDrop",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://mixdrop.co/imgs/mixdrop-logo2.png"
}

46
servers/mixdrop.py Normal file
View File

@@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertoolsV2
from platformcode import config, logger
from lib import jsunpack
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'WE ARE SORRY' in data:
return False, config.get_localized_string(70449) % "MixDrop"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
# streaming url
data = httptools.downloadpage(page_url).data
jsCode = scrapertoolsV2.find_single_match(data, '<script>\r\nMDCore\.ref = "[a-z0-9]+";\r\n(.*?)\r\n</script>')
jsUnpacked = jsunpack.unpack(jsCode)
url = "https://" + scrapertoolsV2.find_single_match(jsUnpacked, 'MDCore\.vsrc="//([^"]+)')
itemlist.append([".mp4 [MixDrop]", url])
# download url
# import urllib
# try:
# import json
# except:
# import simplejson as json
# page_url = page_url.replace('/e/', '/f/') + '?download'
# data = httptools.downloadpage(page_url).data
# csrf = scrapertoolsV2.find_single_match(data, '<meta name="csrf" content="([^"]+)">')
# postData = {'csrf': csrf, 'a': 'genticket'}
# resp = httptools.downloadpage(page_url, post=urllib.urlencode(postData)).data
# resp = json.loads(resp)
# if resp['type'] == 'ok':
# itemlist.append([".mp4 [MixDrop]", 'https:' + resp['url']])
return itemlist

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -22,6 +22,11 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, "[VVVVID] The file does not exist or has been deleted"
else:
page_url = page_url.replace("/show/","/#!show/")
show_id = re.findall("#!show/([0-9]+)/", page_url)[0]
name = re.findall(show_id + "/(.+?)/", page_url)
if not name: return False, "[VVVVID] The file does not exist or has been deleted"
return True, ""

View File

@@ -434,7 +434,7 @@ def get_title(item):
if item.quality:
title += support.typo(item.quality, '_ [] color kod')
season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.news) else ''
season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.news and not item.episode) else ''
if season_:
title += season_
return title