Merge remote-tracking branch 'origin/master'

This commit is contained in:
marco
2020-01-17 19:13:01 +01:00
12 changed files with 159 additions and 93 deletions

View File

@@ -30,6 +30,7 @@
"netfreex": "https://www.netfreex.online",
"piratestreaming": "https://www.piratestreaming.gratis",
"polpotv": "https://polpo.tv",
"pufimovies": "https://pufimovies.com",
"seriehd": "https://www.seriehd.watch",
"serietvonline": "https://serietvonline.monster",
"serietvsubita": "http://serietvsubita.xyz",

11
channels/pufimovies.json Normal file
View File

@@ -0,0 +1,11 @@
{
"id": "pufimovies",
"name": "PufiMovies",
"active": true,
"adult": false,
"language": ["ita", "sub-ita"],
"thumbnail": "pufimovies.png",
"banner": "pufimovies.png",
"categories": ["movie","tvshow"],
"settings": []
}

111
channels/pufimovies.py Normal file
View File

@@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per PufiMovies
# ------------------------------------------------------------
from core import support
host = support.config.get_channel_url()
list_servers = ['mixdrop', 'wstream', 'vupplayer', 'supervideo', 'cloudvideo', 'gounlimited']
list_quality = ['default','1080p', '720p', '480p', '360p']
headers = [['Referer', host]]
@support.menu
def mainlist(item):
film = [
('Generi', ['', 'menu', 'Film']),
('Più Visti', ['','peliculas', 'most'])
]
tvshow = ['',
('Generi', ['', 'menu', 'Serie Tv']),
('Ultimi Episodi', ['','peliculas', 'last'])
]
search = ''
return locals()
@support.scrape
def menu(item):
action = 'peliculas'
patronBlock = item.args + r' Categorie</a>\s*<ul(?P<block>.*?)</ul>'
patronMenu = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^>]+)<'
return locals()
def search(item, text):
support.log('search', item)
text = text.replace(' ', '+')
item.url = host + '/search/keyword/' + text
try:
item.args = 'search'
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('search log:', line)
return []
def newest(categoria):
support.log(categoria)
itemlist = []
item = support.Item()
item.url = host
item.action = 'peliculas'
try:
if categoria == 'peliculas':
item.contentType = 'movie'
itemlist = peliculas(item)
else:
item.args = 'last'
item.contentType = 'tvshow'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log({0}.format(line))
return []
return itemlist
@support.scrape
def peliculas(item):
if item.contentType == 'tvshow' and not item.args:
action = 'episodios'
patron = r'<div class="movie-box">\s*<a href="(?P<url>[^"]+)">[^>]+>[^>]+>\D+Streaming\s(?P<lang>[^"]+)[^>]+>[^>]+>[^>]+>(?P<quality>[^<]+)[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)[^>]+>[^>]+>[^>]+>\s*(?P<year>\d+)'
elif item.contentType == 'movie' and not item.args:
patron = r'<div class="existing_item col-6 col-lg-3 col-sm-4 col-xl-4">\s*<div class="movie-box">\s*<a href="(?P<url>(?:http(?:s)://[^/]+)?/(?P<type>[^/]+)/[^"]+)">[^>]+>[^>]+>\D+Streaming\s*(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)[^>]+>[^>]+>[^>]+>\s*(?:(?P<year>\d+))?[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]*)<'
elif item.args == 'last':
patron = r'<div class="episode-box">[^>]+>[^>]+>[^>]+>\D+Streaming\s(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>[^^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>\D*(?P<season>\d+)[^>]+>\D*(?P<episode>\d+)'
elif item.args == 'most':
patron =r'div class="sm-113 item">\s*<a href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*(?P<title>[^<]+)'
else:
patron = r'<div class="movie-box">\s*<a href="(?P<url>(?:http(?:s)://[^/]+)?/(?P<type>[^/]+)/[^"]+)">[^>]+>[^>]+>\D+Streaming\s*(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)[^>]+>[^>]+>[^>]+>\s*(?:(?P<year>\d+))?[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]*)<'
typeActionDict = {'findvideos':['movie'], 'episodios':['tvshow']}
typeContentDict = {'movie':['movie'], 'tvshow':['tvshow']}
patronNext = r'<a href="([^"]+)"[^>]+>&raquo;'
return locals()
@support.scrape
def episodios(item):
patron = r'<div class="episode-box">[^>]+>[^>]+>[^>]+>\D+Streaming\s(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>\D*(?P<season>\d+)[^>]+>\D*(?P<episode>\d+)'
return locals()
def findvideos(item):
support.log()
# match = support.match(item, patron='wstream', debug=True)
return support.server(item)

View File

@@ -219,7 +219,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
if scraped['season']:
stagione = scraped['season']
episode = scraped['season'] +'x'+ scraped['episode']
episode = scraped['season'] +'x'+ scraped['episode'].zfill(2)
elif item.season:
episode = item.season +'x'+ scraped['episode']
elif item.contentType == 'tvshow' and (scraped['episode'] == '' and scraped['season'] == '' and stagione == ''):
@@ -295,7 +295,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
quality=quality,
url=scraped["url"],
infoLabels=infolabels,
thumbnail=item.thumbnail if function == 'episodios' else scraped["thumb"] ,
thumbnail=item.thumbnail if function == 'episodios' and not scraped["thumb"] else scraped["thumb"] ,
args=item.args,
contentSerieName= scraped['title'] if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '',
contentTitle= scraped['title'] if item.contentType or CT == 'movie' else '',

View File

@@ -726,8 +726,8 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
mediaurl, view, mpd = get_video_seleccionado(item, seleccion, video_urls)
if mediaurl == "":
return
# no certificate verification
mediaurl = mediaurl.replace('https://', 'http://')
# # no certificate verification
# mediaurl = mediaurl.replace('https://', 'http://')
# se obtiene la información del video.
if not item.contentThumbnail:

View File

@@ -9,8 +9,10 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
html = httptools.downloadpage(page_url)
global data
data = html.data
if html.code == 404:
return False, config.get_localized_string(70292) % "CloudVideo"
return True, ""
@@ -18,7 +20,8 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
global data
# data = httptools.downloadpage(page_url).data
# enc_data = scrapertools.find_single_match(data, "text/javascript">(.+?)</script>")
# dec_data = jsunpack.unpack(enc_data)
sources = scrapertools.find_single_match(data, "<source(.*?)</source")

View File

@@ -9,7 +9,7 @@ import ast
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url, cookies=False).data
if 'File Not Found' in data:
return False, config.get_localized_string(70449) % "SuperVideo"
@@ -20,27 +20,35 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
logger.info('SUPER DATA= '+data)
# data = httptools.downloadpage(page_url).data
global data
code_data = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval.*)")
if code_data:
code = jsunpack.unpack(code_data)
# corrections
if 'file' in code and not '"file"'in code: code = code.replace('file','"file"')
if 'label' in code and not '"label"'in code: code = code.replace('label','"label"')
match = scrapertools.find_single_match(code, r'sources:(\[[^]]+\])')
lSrc = ast.literal_eval(match)
lQuality = ['360p', '720p', '1080p', '4k'][:len(lSrc)-1]
lQuality.reverse()
# lQuality = ['360p', '720p', '1080p', '4k'][:len(lSrc)-1]
# lQuality.reverse()
for source in lSrc:
quality = source['label'] if source.has_key('label') else 'auto'
video_urls.append(['.' + source['file'].split('.')[-1] + ' [' + quality + '] [SuperVideo]', source['file']])
for n, source in enumerate(lSrc):
quality = 'auto' if n==0 else lQuality[n-1]
video_urls.append(['.' + source.split('.')[-1] + '(' + quality + ') [SuperVideo]', source])
else:
logger.info('ELSE!')
matches = scrapertools.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:\s*, res:\s(\d+))?')
for url, quality in matches:
if url.split('.')[-1] != 'm3u8':
video_urls.append([url.split('.')[-1] + ' [' + quality + ']', url])
video_urls.append([url.split('.')[-1] + ' [' + quality + '] [SuperVideo]', url])
else:
video_urls.append([url.split('.')[-1], url])
video_urls.sort(key=lambda x: x[0].split()[-1])
video_urls.sort(key=lambda x: x[0].split()[-2])
return video_urls

View File

@@ -8,6 +8,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "Page not found" in data or "File was deleted" in data:
return False, "[vidoza] El archivo no existe o ha sido borrado"
@@ -19,7 +20,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
global data
video_urls = []
s = scrapertools.find_single_match(data, r'sourcesCode\s*:\s*(\[\{.*?\}\])')
@@ -30,8 +31,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if 'src' in enlace or 'file' in enlace:
url = enlace['src'] if 'src' in enlace else enlace['file']
tit = ''
if 'label' in enlace: tit += '[%s]' % enlace['label']
if 'res' in enlace: tit += '[%s]' % enlace['res']
if 'label' in enlace: tit += ' [%s]' % enlace['label']
if 'res' in enlace: tit += ' [%s]' % enlace['res']
if tit == '' and 'type' in enlace: tit = enlace['type']
if tit == '': tit = '.mp4'

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://vup.to/embed-[A-z0-9]+.html)",
"url": "\\1"
}
]
},
"free": true,
"id": "vup",
"name": "VUP",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_vupplayer.png"
}

View File

@@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector vup By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, "[vup] El video ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
bloque = scrapertools.find_single_match(data, 'sources:.*?\]')
video_urls = []
videourl = scrapertools.find_multiple_matches(bloque, '"(http[^"]+)')
for video in videourl:
video_urls.append([".MP4 [vup]", video])
video_urls = video_urls[::-1]
return video_urls

View File

@@ -7,6 +7,8 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
global data
data = data.data
if data.code == 404:
return False, config.get_localized_string(70449)
return True, ""
@@ -15,8 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
logger.info('VUP DATA= '+ data)
global data
patron = r'sources:\s*\[\{src:\s*"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:

View File

@@ -7,7 +7,7 @@
"find_videos": {
"patterns": [
{
"pattern": "wstream\\.video.*?(?<!api)(?:=|/)(?:embed-)?(?<!streaming\\.php\\?id=)([a-z0-9A-Z]+)(?:[^/_.a-z0-9A-Z]|$)",
"pattern": "wstream\\.video(?!<).*?(?<!api)(?:=|/)(?:embed-)?(?<!streaming\\.php\\?id=)([a-z0-9A-Z]+)(?:[^/_.a-z0-9A-Z]|$)",
"url": "https://wstream.video/video.php?file_code=\\1"
},
{