Fix vari canali AnimeWorld, Filmpertutti, serieTVU e dreamsub.

Fix server clipwatching e aggiunta risoluzione in Wstream
This commit is contained in:
marco
2019-11-14 09:16:10 +01:00
parent d8a61c0b20
commit 4e80db95f3
11 changed files with 148 additions and 428 deletions

View File

@@ -1,267 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per 'cinemalibero'
"""
Questi sono commenti per i beta-tester.
Su questo canale, nelle categorie:
-'Ricerca Globale'
- Novità, voce interna al canale
- Nella lista anime
non saranno presenti le voci:
- 'Aggiungi alla Videoteca'
- 'Scarica Film'/'Scarica Serie',
Inoltre nella lista Anime non è presente la voce rinumerazione!
dunque, la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Novità ( globale ), presenti solo i film:
- film ( 20 titoli ) della pagina https://www.cinemalibero.best/category/film/
Avvisi:
- Eventuali avvisi per i tester
Ulteriori info:
"""
import re
# per l'uso dei decoratori, per i log, e funzioni per siti particolari
from core import support
# se non si fa uso di findhost()
from platformcode import config
# in caso di necessità
from core import scrapertoolsV2, httptools#, servertools
from core.item import Item # per newest
#from lib import unshortenit
# se necessaria la variabile __channel__
# da cancellare se non utilizzata
__channel__ = "cinemalibero"
# da cancellare se si utilizza findhost()
host = config.get_channel_url('cinemalibero')
headers = [['Referer', host]]
list_servers = ['akstream', 'wstream', 'openload', 'streamango']
list_quality = ['default']
### fine variabili
#### Inizio delle def principali ###
@support.menu
def mainlist(item):
support.log(item)
film = ['/category/film/',
('Generi', ['', 'genres', 'genres']),
]
# Voce SERIE, puoi solo impostare l'url
tvshow = ['/category/serie-tv/',
('Novità', ['/aggiornamenti-serie-tv/', 'peliculas', 'update'])
]
# Voce ANIME, puoi solo impostare l'url
Anime = [('Anime', ['/category/anime-giapponesi/', 'peliculas', 'anime', 'tvshow']), # url per la voce Anime, se possibile la pagina con titoli di anime
## #Voce Menu,['url','action','args',contentType]
## ('Novità', ['', '', '']),
## ('In Corso',['', '', '', '']),
## ('Ultimi Episodi',['', '', '', '']),
## ('Ultime Serie',['', '', '', ''])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.log(item)
#support.dbg() # decommentare per attivare web_pdb
if item.args == 'search':
patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">.+?class="titolo">(?P<title>[^<]+)<'
patronBlock = r'style="color: #2C3549 !important;" class="fon my-3"><small>.+?</small></h1>(?P<block>.*?)<div class="bg-dark ">'
action = 'select'
else:
if item.contentType == 'tvshow':
if item.args == 'update':
patron = r'<div class="card-body p-0">\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">\s<div class="titolo">(?P<title>.+?)(?: &#8211; Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>[ ]<div class="genere">(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
action = 'select'
def itemHook(item):
if item.lang2:
if len(item.lang2) <3:
item.lang2 = 'ITA'
item.contentLanguage = item.lang2
item.title += support.typo(item.lang2, '_ [] color kod')
return item
elif item.args == 'anime':# or 'anime' in item.url:
patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?:\([sSuUbBiItTaA\-]+\))?\s?(?:(?P<year>\d{4}|\(\d{4}\)|)?)?<[^>]+>(?:<div class="genere">.+?(?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?)?'
action = 'select'
else:
patron = r'<div class="card-body p-0">\s?<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)</div>(?:<div class="genere">(?:[ |\w]+?(?:[\dx\-]+)?[ ](?:\()?(?P<lang>[sSuUbB]+|[iItTaA]+)(?:\))?\s?(?P<quality>[\w]+)?\s?|[\s|S]?[\dx\-]+\s[|]?\s?(?:[\w]+)?\s?\(?(\4[sSuUbB]+)?\)?)?.+?</div>)?'
action = 'episodios'
elif item.contentType == 'movie':
action = 'findvideos'
patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?:\[(?P<lang>Sub-iTA|Sub-ITA|Sub)\])?[ ]\((?P<year>\d+)\)</div>(?:<div class="genere">(?P<quality>[^<]+)<)?'
patronBlock = r'<h1(?: style="color: #2C3549 !important; text-transform: uppercase;"| style="text-transform: uppercase; color: #2C3549 !important;"| style="color: #2C3549 !important; text-transform: uppercase;" style="text-shadow: 1px 1px 1px #FF8C00; color:#FF8C00;"| style="text-shadow: 1px 1px 1px #0f0f0f;" class="darkorange"| style="color:#2C3549 !important;")>.+?</h1>(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
patronNext = '<a class="next page-numbers".*?href="([^"]+)">'
## debug = True # True per testare le regex sul sito
return locals()
@support.scrape
def episodios(item):
support.log(item)
#support.dbg()
data = item.data1
if item.args == 'anime':
item.contentType = 'tvshow'
blacklist = ['Clipwatching', 'Verystream', 'Easybytez']
patron = r'(?:href="[ ]?(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)<|(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?(?:(\4[^<]+)(\2.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>))'
#patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>(Epis|).+?(?P<episode>\d+)?)(?:\((?P<lang>Sub ITA)\))?</a>(?:<br />)?'
patronBlock = r'(?:class="txt_dow">Streaming:(?P<block>.*?)at-below-post)'
else:
patron = r'(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?[ ]?(?:(?P<title>[^<]+)(?P<url>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)'
## patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(\d+)?)(?:\((?P<lang>Sub ITA)\))?</a><br />'
patronBlock = r'<p><strong>(?P<block>(?:.+?[Ss]tagione.+?(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?(?:|.+?|</strong>)(/?:</span>)?</p>.*?</p>)'
item.contentType = 'tvshow'
action = 'findvideos'
debug = True
return locals()
@support.scrape
def genres(item):
support.log(item)
#support.dbg()
action = 'peliculas'
#blacklist = ['']
patron = r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
return locals()
############## Fine ordine obbligato
## Def ulteriori
def select(item):
support.log('select --->', item)
#support.dbg()
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<div style="margin-left: 0.5%; color: #FFF;">')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
args='serie',
contentType='tvshow',
data1 = data
))
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodio', block, re.IGNORECASE) or re.findall('stagione', data, re.IGNORECASE) or re.findall('numero stagioni', data, re.IGNORECASE):
support.log('select = ### è un anime ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
args='anime',
contentType='tvshow',
data1 = data
))
else:
support.log('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
args = '',
contentType='movie',
#data = data
))
else:
support.log('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie',
#data = data
))
############## Fondo Pagina
# da adattare al canale
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + "/?s=" + text
item.contentType = item.contentType
try:
item.args = 'search'
item.contentType = 'episode' # non fa uscire le voci nel context menu
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
support.log('search log:', line)
return []
# da adattare al canale
# inserire newest solo se il sito ha la pagina con le ultime novità/aggiunte
# altrimenti NON inserirlo
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host+'/category/film/'
item.contentType = 'movie'
## item.action = 'peliculas'
## itemlist = peliculas(item)
elif categoria == 'series':
item.contentType = 'tvshow'
item.args = 'update'
item.url = host+'/aggiornamenti-serie-tv/'
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
return []
return itemlist
#support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True)
def findvideos(item):
support.log('findvideos ->', item)
#return support.server(item, headers=headers)
support.log(item)
if item.contentType == 'movie':
return support.server(item)
else:
return support.server(item, data= item.url)

View File

@@ -95,6 +95,7 @@ def peliculas(item):
anime=True
if item.args == 'updated':
item.contentType='episode'
patron=r'<div class="inner">\s*<a href="(?P<url>[^"]+)" class[^>]+>\s*<img src="(?P<thumb>[^"]+)" alt?="(?P<title>[^\("]+)(?:\((?P<lang>[^\)]+)\))?"[^>]+>[^>]+>\s*(?:<div class="[^"]+">(?P<type>[^<]+)</div>)?[^>]+>[^>]+>\s*<div class="ep">[^\d]+(?P<episode>\d+)[^<]*</div>'
action='findvideos'
else:
@@ -143,8 +144,9 @@ def findvideos(item):
itemlist = []
matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">', headers=headers)
videoData = ''
for serverid in matches:
if not item.number: item.number = support.scrapertoolsV2.find_single_match(item.title,r'(\d+) -')
block = support.scrapertoolsV2.find_multiple_matches(data,'data-id="' + serverid + '">(.*?)<div class="server')
ID = support.scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="' + (item.number if item.number else '1') + '"')
support.log('ID= ',serverid)
@@ -154,29 +156,32 @@ def findvideos(item):
for url in matches:
videoData += '\n' + url
else:
dataJson = support.httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, ID, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
support.log(json)
if 'keepsetsu' in json['grabber']:
matches = support.match(item, r'<iframe\s*src="([^"]+)"', url=json['grabber'])[0]
for url in matches:
videoData += '\n' + url
else:
videoData += '\n' + json['grabber']
try:
dataJson = support.httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, ID, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
support.log(json)
if 'keepsetsu' in json['grabber']:
matches = support.match(item, r'<iframe\s*src="([^"]+)"', url=json['grabber'])[0]
for url in matches:
videoData += '\n' + url
else:
videoData += '\n' + json['grabber']
if serverid == '28':
itemlist.append(
support.Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=json['grabber'],
server='directo',
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
if serverid == '28':
itemlist.append(
support.Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=json['grabber'],
server='directo',
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
except:
pass
return support.server(item, videoData, itemlist)

View File

@@ -261,7 +261,7 @@ def findvideos(item):
data = httptools.downloadpage(url).data
#host_video = scrapertoolsV2.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
host_video = scrapertoolsV2.find_single_match(data, r'let thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
host_video = scrapertoolsV2.find_single_match(data, r'(?:let|var) thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
link = scrapertoolsV2.find_single_match(data, r'<video src="([^"]+)"')
video_urls = host_video+link
@@ -274,7 +274,7 @@ def findvideos(item):
title=title_show,
fulltitle=item.fulltitle,
show=item.fulltitle,
url=video_urls,
url=link if 'http' in link else video_urls,
infoLabels = item.infoLabels,
thumbnail=item.thumbnail,
contentSerieName= item.fulltitle,

View File

@@ -32,13 +32,13 @@ list_quality = ['HD', 'SD']
def mainlist(item):
film = ['/category/film/',
('Generi', ['/category/film/', 'genres', 'lettersF'])
]
('Generi', ['/category/film/', 'genres', 'lettersF'])
]
tvshow = ['/category/serie-tv/',
('Aggiornamenti', ['/aggiornamenti-serie-tv/', 'peliculas', 'newest']),
('Per Lettera', ['/category/serie-tv/', 'genres', 'lettersS'])
]
]
search = ''
return locals()
@@ -56,7 +56,7 @@ def peliculas(item):
patronBlock = r'<ul class="posts">(?P<block>.*)<div class="clear">'
patron = r'<li>\s?<a href="(?P<url>[^"]+)" data-thumbnail="(?P<thumb>[^"]+)">.*?<div class="title">(?P<title>.+?)(?:\s\[(?P<quality>HD)\])?<\/div>[^>]+>(?:[\dx]+)\s?(?:[ ]\((?P<lang>[a-zA-Z\-]+)\))?.+?</div>'
pagination = ''
if item.args == 'search':
action = 'select'
elif item.contentType == 'tvshow':
@@ -69,33 +69,34 @@ def peliculas(item):
def itemHook(item):
item.title = item.title.replace(r'-', ' ')
return item
#debug = True
return locals()
@support.scrape
def episodios(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
support.log('SERIES DATA= ',data)
if 'accordion-item' in data:
#patronBlock = r'<span class="season(?:|-title)">(?P<season>\d+)[^>]+>[^>]+>\s+?[^>]+>[^>]+>.+?(?:STAGIONE|Stagione).+?\s(?P<lang>[a-zA-Z\-]+).+?</span>(?P<block>.*?)<div id="disqus_thread">'
patronBlock = r'<span class="season(?:|-title)">(?P<season>\d+)[^>]+>[^>]+>\s+?[^>]+>[^>]+>.+?(?:STAGIONE|Stagione).+?\s(?P<lang>[a-zA-Z\-]+)</span>(?P<block>.*?)\s*(?:<li class="s_title">|<div id="disqus_thread">)'
patronBlock = r'<span class="season[^>]*>(?P<season>\d+)[^>]+>[^>]+>[^>]+>[^>]+>\D*(?:STAGIONE|Stagione)[ -]+(?P<lang>[a-zA-Z\- ]+)[^<]*</span>(?P<block>.*?)<div id="(?:season|disqus)'
patron = r'<img src="(?P<thumb>[^"]+)">.*?<li class="season-no">(?P<episode>.*?)<\/li>(?P<url>.*?javascript:;">(?P<title>[^<]+)<.+?)<\/table>'
else:
patronBlock = r'<div id="info" class="pad">(?P<block>.*?)<div id="disqus_thread">'
patron = r'<strong>(?P<lang>.*?)<\/strong>.*?<p>(?P<season>.*?)<span'
#debug = True
# patronBlock = r'<div id="info" class="pad">(?P<block>.*?)<div id="disqus_thread">'
# deflang='Sub-ITA'
patronBlock = r'(?:STAGIONE|Stagione)(?:<[^>]+>)?\s*(?:(?P<lang>[A-Za-z- ]+))?(?P<block>.*?)(?:&nbsp;|<strong>|<div class="addtoany)'
patron = r'(?:/>|p>)\s*(?P<season>\d+)(?:&#215;|×|x)(?P<episode>\d+)[^<]+(?P<url>.*?)(?:<br|</p)'
def itemHook(item):
item.title.replace('&#215;','x')
if not item.contentLanguage:
item.contentLanguage = 'ITA'
return item
return locals()
@support.scrape
def genres(item):
support.log()
itemlist = []
if item.args == 'lettersF':
item.contentType = 'movie'
@@ -104,31 +105,24 @@ def genres(item):
action = 'peliculas'
patronBlock = r'<select class="cats">(?P<block>.*?)<\/select>'
patron = r'<option data-src="(?P<url>[^"]+)">(?P<title>.*?)<\/option>'
patronMenu = r'<option data-src="(?P<url>[^"]+)">(?P<title>.*?)<\/option>'
return locals()
def select(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
patronBlock = scrapertoolsV2.find_single_match(data, r'class="taxonomy category" ><span property="name">(.*?)</span></a><meta property="position" content="2">')
if patronBlock.lower() != 'film':
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
contentSerieName = item.fulltitle,
url=item.url,
contentType='tvshow'))
item.contentType='tvshow'
return episodios(item)
else:
support.log('select = ### è un movie ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
item.contentType='movie'
return findvideos(item)
def search(item, texto):
@@ -164,9 +158,6 @@ def newest(categoria):
item.contentType = 'tvshow'
itemlist = peliculas(item)
## if itemlist[-1].action == "peliculas":
## itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys

View File

@@ -23,10 +23,9 @@ list_quality = ['default']
@support.menu
def mainlist(item):
log()
tvshow = ['/category/serie-tv',
('Aggiornamenti Serie', ['/ultimi-episodi/', 'peliculas', 'update']),
('Ultimi episodi', ['/ultimi-episodi/', 'peliculas', 'update']),
('Generi', ['', 'genres', 'genres'])
]
@@ -35,47 +34,41 @@ def mainlist(item):
@support.scrape
def peliculas(item):
log()
patronBlock = r'<div class="wrap">\s+<h.>.*?</h.>(?P<block>.*?)<footer>'
if item.args != 'update':
action = 'episodios'
patron = r'<div class="item">\s*<a href="(?P<url>[^"]+)" data-original="(?P<thumb>[^"]+)" class="lazy inner">[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)<'
action = 'episodios'
patron = r'<div class="item">\s*<a href="(?P<url>[^"]+)" data-original="(?P<thumb>[^"]+)" class="lazy inner">[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)<'
else:
action = 'findvideos'
patron = r'<div class="item">\s+?<a href="(?P<url>[^"]+)"\s+?data-original="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>.+?)<[^>]+>\((?P<episode>[\dx\-]+)\s+?(?P<lang>Sub-Ita|[iITtAa]+)\)<'
pagination = 25
patronNext = r'<li><a href="([^"]+)"\s+?>Pagina successiva'
#support.regexDbg(item, patron, headers)
#debug = True
return locals()
@support.scrape
def episodios(item):
log()
seasons, data = support.match(item, r'<option value="(\d+)"[^>]*>\D+(\d+)')
patronBlock = r'</select><div style="clear:both"></div></h2>(?P<block>.*?)<div id="trailer" class="tab">'
patron = r'(?:<div class="list (?:active)?" data-id="(?P<season>\d+)">[^>]+>)?\s+<a data-id="(?P<episode>\d+)(?:[ ](?P<lang>[SuUbBiItTaA\-]+))?"(?P<url>[^>]+)>[^>]+>[^>]+>(?P<title>.+?)(?:\sSub-ITA)?<'
#support.regexDbg(item, patronBlock, headers)
#debug = True
return locals()
patron = r'(?:<div class="list (?:active)?" data-id="(?P<season>\d+)">[^>]+>)?\s*<a data-id="(?P<episode>\d+)(?:[ ](?P<lang>[SuUbBiItTaA\-]+))?"(?P<url>[^>]+)>[^>]+>[^>]+>(?P<title>.+?)(?:\sSub-ITA)?<'
def itemHook(item):
for value, season in seasons:
log(value)
log(season)
item.title = item.title.replace(value+'x',season+'x')
return item
return locals()
@support.scrape
def genres(item):
log()
blacklist = ["Home Page", "Calendario Aggiornamenti"]
action = 'peliculas'
patronBlock = r'<h2>Sfoglia</h2>\s*<ul>(?P<block>.*?)</ul>\s*</section>'
patron = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)</a></li>'
#debug = True
patronMenu = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)</a></li>'
return locals()
@@ -92,6 +85,7 @@ def search(item, text):
log("%s" % line)
return []
def newest(categoria):
log(categoria)
itemlist = []
@@ -117,7 +111,7 @@ def newest(categoria):
def findvideos(item):
log()
if item.args != 'update':
return support.server(item, data=item.url)
return support.server(item, data=item.url)
else:
itemlist = []
item.infoLabels['mediatype'] = 'episode'
@@ -125,26 +119,25 @@ def findvideos(item):
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
## support.log("DATA - HTML:\n", data)
url_video = scrapertoolsV2.find_single_match(data, r'<div class="item"> <a data-id="[^"]+" data-href="([^"]+)" data-original="[^"]+"[^>]+> <div> <div class="title">Episodio \d+', -1)
url_serie = scrapertoolsV2.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>')
goseries = support.typo("Vai alla Serie:", ' bold')
goseries = support.typo(">> Vai alla Serie:", ' bold')
series = support.typo(item.contentSerieName, ' bold color kod')
itemlist = support.server(item, data=url_video)
itemlist.append(
Item(channel=item.channel,
title=goseries + series,
fulltitle=item.fulltitle,
show=item.show,
contentType='tvshow',
contentSerieName=item.contentSerieName,
url=url_serie,
action='episodios',
contentTitle=item.contentSerieName,
plot = goseries + series + "con tutte le puntate",
))
#support.regexDbg(item, patronBlock, headers)
return itemlist
title=goseries + series,
fulltitle=item.fulltitle,
show=item.show,
contentType='tvshow',
contentSerieName=item.contentSerieName,
url=url_serie,
action='episodios',
contentTitle=item.contentSerieName,
plot = goseries + series + "con tutte le puntate",
thumbnail = support.thumb(thumb='channels_tvshow.png')
))
return itemlist

View File

@@ -269,7 +269,7 @@ def get_thumb(thumb_name, view="thumb_", auto=False):
else:
icon_pack_name = config.get_setting('icon_set', default="default")
media_path = os.path.join("https://raw.githubusercontent.com/kodiondemand/media/master/themes/", icon_pack_name)
media_path = os.path.join("https://raw.githubusercontent.com/kodiondemand/media/master/themes", icon_pack_name)
if config.get_setting('enable_custom_theme') and config.get_setting('custom_theme') and os.path.isfile(config.get_setting('custom_theme') + view + thumb_name):
media_path = config.get_setting('custom_theme')
@@ -344,7 +344,7 @@ def auto_filter(auto_lang=False):
# return lang, lang_list
def thumb(itemlist=[], genre=False):
def thumb(itemlist=[], genre=False, thumb=''):
if itemlist:
import re
@@ -428,5 +428,7 @@ def thumb(itemlist=[], genre=False):
item.title = re.sub(r'\s*\{[^\}]+\}','',item.title)
return itemlist
elif thumb:
return get_thumb(thumb)
else:
return get_thumb('next.png')

View File

@@ -781,32 +781,34 @@ def download(itemlist, item, typography='', function_level=1, function=''):
if itemlist and item.contentChannel != 'videolibrary':
itemlist.append(
Item(channel='downloads',
from_channel=item.channel,
title=title,
fulltitle=item.fulltitle,
show=item.fulltitle,
contentType=item.contentType,
contentSerieName=contentSerieName,
url=item.url,
action='save_download',
from_action=from_action,
contentTitle=contentTitle,
path=item.path
from_channel=item.channel,
title=title,
fulltitle=item.fulltitle,
show=item.fulltitle,
contentType=item.contentType,
contentSerieName=contentSerieName,
url=item.url,
action='save_download',
from_action=from_action,
contentTitle=contentTitle,
path=item.path,
thumbnail=thumb(thumb='downloads.png')
))
if from_action == 'episodios':
itemlist.append(
Item(channel='downloads',
from_channel=item.channel,
title=typo(config.get_localized_string(60357),typography),
fulltitle=item.fulltitle,
show=item.fulltitle,
contentType=item.contentType,
contentSerieName=contentSerieName,
url=item.url,
action='save_download',
from_action=from_action,
contentTitle=contentTitle,
download='season'
from_channel=item.channel,
title=typo(config.get_localized_string(60357),typography),
fulltitle=item.fulltitle,
show=item.fulltitle,
contentType=item.contentType,
contentSerieName=contentSerieName,
url=item.url,
action='save_download',
from_action=from_action,
contentTitle=contentTitle,
download='season',
thumbnail=thumb(thumb='downloads.png')
))
return itemlist

View File

@@ -4,18 +4,14 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "clipwatching.com/(e.*?.html)",
"url": "http://clipwatching.com/\\1"
},
{
"pattern": "clipwatching.com/(\\w+)",
"pattern": "clipwatching.com/(?:embed-)?([a-zA-Z0-9]+).html",
"url": "http://clipwatching.com/embed-\\1.html"
}
]
},
"free": true,
"id": "clipwatching",
"name": "clipwatching",
"name": "ClipWatching",
"settings": [
{
"default": false,
@@ -42,5 +38,5 @@
"visible": false
}
],
"thumbnail": "https://s17.postimg.cc/e6kcan0vj/clipwatching1.png"
"thumbnail": "server_clipwatching.png"
}

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
@@ -17,12 +18,16 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
packed = scrapertools.find_single_match(data, "text/javascript'>(.*?)\s*</script>")
unpacked = jsunpack.unpack(packed)
data = re.sub('\t|\n','',data)
logger.info('CLIP DATA= ' + data)
packed = scrapertools.find_single_match(data, r"text/javascript'>(.*?)\s*</script>")
try: unpacked = jsunpack.unpack(packed)
except: unpacked = data
video_urls = []
videos = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+).*?label:"([^"]+)')
for video, label in videos:
videos = scrapertools.find_multiple_matches(unpacked, r'(?:file|src):\s*"([^"]+).*?type:\s*"video/([^"]+)".*?label:\s*"([^"]+)')
for video, Type, label in videos:
logger.info(Type)
logger.info(label)
if ".jpg" not in video:
video_urls.append([label + " [clipwatching]", video])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
video_urls.append(['%s [%sp] [ClipWatching]' % (Type, label), video])
return video_urls

View File

@@ -1,9 +1,9 @@
{
"id": "wstream",
"name": "wstream",
"name": "Wstream",
"active": true,
"free": true,
"thumbnail": "http:\/\/media.tvalacarta.info\/servers\/server_wstream.png",
"thumbnail": "server_wstream.png",
"find_videos": {
"patterns": [
{
@@ -23,7 +23,7 @@
"url": "https://wstream.video/video.php?file_code=\\1"
},
{
"pattern": "wstream\\.video/(?!api/|stream/)([a-z0-9A-Z]+)",
"pattern": "wstream\\.video/(?!api/|stream/|embed-)([a-z0-9A-Z]+)",
"url": "https://wstream.video/video.php?file_code=\\1"
}
],

View File

@@ -2,11 +2,10 @@
# Kodi on Demand - Kodi Addon - Kodi Addon
# by DrZ3r0 - Fix Alhaziel
import re
import urllib
import re, json, urllib
from core import httptools, scrapertools
from platformcode import logger
from platformcode import logger, config
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']]
@@ -14,13 +13,13 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, "[wstream.py] Il File Non esiste"
return False, config.get_localized_string(70449) % 'Wstream'
return True, ""
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("[wstream.py] url=" + page_url)
logger.info("[Wstream] url=" + page_url)
video_urls = []
if '/streaming.php' in page_url:
@@ -28,33 +27,27 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
page_url = 'https://wstream.video/video.php?file_code=' + code
code = page_url.split('=')[-1]
post = urllib.urlencode({
'videox': code
})
post = urllib.urlencode({'videox': code})
data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data.replace('https','http')
logger.info("[wstream.py] data=" + data)
vid = scrapertools.find_multiple_matches(data, 'download_video.*?>.*?<.*?<td>([^\,,\s]+)')
data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data
headers.append(['Referer', page_url])
post_data = scrapertools.find_single_match(data,"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
_headers = urllib.urlencode(dict(headers))
post_data = scrapertools.find_single_match(data, r"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
if post_data != "":
from lib import jsunpack
data = jsunpack.unpack(post_data)
logger.info("[wstream.py] data=" + data)
block = scrapertools.find_single_match(data, 'sources:\s*\[[^\]]+\]')
data = block
media_urls = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)')
_headers = urllib.urlencode(dict(headers))
i = 0
data = scrapertools.find_single_match(data, r'sources:\s*(\[[^\]]+\])')
data = re.sub('([A-z]+):(?!/)','"\\1":',data)
keys = json.loads(data)
for media_url in media_urls:
video_urls.append([vid[i] if vid else 'video' + " mp4 [wstream] ", media_url + '|' + _headers])
i = i + 1
for key in keys:
video_urls.append(['%s [%sp]' % (key['type'].replace('video/',''), key['label']), key['src'].replace('https','http') + '|' + _headers])
else:
media_urls = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)')
for video_url in video_urls:
logger.info("[wstream.py] %s - %s" % (video_url[0], video_url[1]))
logger.info(video_urls)
for media_url in media_urls:
video_urls.append(['video' + " mp4 [wstream] ", media_url + '|' + _headers])
return video_urls