Aggiunto canale tapmovie(sperimentale) e server annessi (hxfile, playtube, fix altri)
This commit is contained in:
@@ -15,6 +15,7 @@
|
||||
"cinemalibero": "https://cinemalibero.blog",
|
||||
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
|
||||
"discoveryplus": "https://www.discoveryplus.com",
|
||||
"tapmovie": "https://it.tapmovie.net",
|
||||
"dreamsub": "https://dreamsub.stream",
|
||||
"dsda": "https://www.dsda.press",
|
||||
"eurostreaming": "https://eurostreaming.click",
|
||||
|
||||
11
channels/tapmovie.json
Normal file
11
channels/tapmovie.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"id": "tapmovie",
|
||||
"name": "Tap Movie",
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": true,
|
||||
"thumbnail": "",
|
||||
"banner": "",
|
||||
"categories": ["movie", "tvshow", "anime"],
|
||||
"not_active": ["include_in_newest"],
|
||||
"settings": []
|
||||
}
|
||||
78
channels/tapmovie.py
Normal file
78
channels/tapmovie.py
Normal file
@@ -0,0 +1,78 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per 'dvdita'
|
||||
|
||||
from core import support, httptools
|
||||
from core.item import Item
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
api_url = '/api/v2/'
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
film = ['/browse/movie']
|
||||
tvshow = ['/browse/tvshow']
|
||||
search = ''
|
||||
|
||||
# [Voce Menu,['url','action','args',contentType]
|
||||
# top = [('Generi', ['', 'genres', '', 'undefined'])]
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
itemlist = []
|
||||
movie = item.contentType == 'movie'
|
||||
key = 'latest_updated' if movie else 'latest_tvshows'
|
||||
action = 'findvideos' if movie else 'episodios'
|
||||
json = httptools.downloadpage(host + api_url + 'home', post={}).json[key]
|
||||
for i in json:
|
||||
itemlist.append(item.clone(id=i.get('id'), title=i.get('title'), contentTitle=i.get('title'), contentSerieName='' if movie else i.get('title'),
|
||||
contentPlot=i.get('description'), thumbnail=i.get('poster'),
|
||||
fanart=i.get('backdrop'), year=i.get('year'), action=action,
|
||||
url='{}/{}/{}-{}'.format(host, item.contentType, i.get('id'), support.scrapertools.slugify(i.get('title')))))
|
||||
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
support.info(item)
|
||||
itemlist = []
|
||||
|
||||
for season in httptools.downloadpage(host + api_url + 'tvshow', post={'tvshow_id': item.id}).json.get('season', []):
|
||||
season_id = season['season_number']
|
||||
for episode in httptools.downloadpage(host + api_url + 'episodes', post={'tvshow_id': item.id, 'season_id': season_id}).json.get('episodes', []):
|
||||
itemlist.append(item.clone(action="findvideos", contentSeason=season_id, contentEpisodeNumber=episode['episode_number'], id=item.id,
|
||||
title=str(season_id)+'x'+episode['episode_number'], contentType='episode'))
|
||||
support.videolibrary(itemlist, item)
|
||||
support.download(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.info('search', item)
|
||||
itemlist = []
|
||||
for result in httptools.downloadpage(host + api_url + 'search', post={'search': text}).json.get('results', []):
|
||||
contentType = 'movie' if result['type'] == 'FILM' else 'tvshow'
|
||||
itemlist.append(item.clone(id=result.get('id'), title=result.get('title'), contentTitle=result.get('title'),
|
||||
contentSerieName='' if contentType == 'movie' else result.get('title'),
|
||||
contentPlot=result.get('description'), thumbnail=result.get('poster'),
|
||||
fanart=result.get('backdrop'), year=result.get('year'), action='episodios' if contentType == 'tvshow' else 'findvideos',
|
||||
url='{}/{}/{}-{}'.format(host, contentType, result.get('id'), support.scrapertools.slugify(result.get('title'))),
|
||||
contentType=contentType))
|
||||
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
if not item.contentSeason: # film
|
||||
json = httptools.downloadpage(host + api_url + 'movie', post={'movie_id': item.id}).json
|
||||
else:
|
||||
json = httptools.downloadpage(host + api_url + 'episode/links', post={'tvshow_id': item.id, 'season_id': item.contentSeason, 'episode_id': item.contentEpisodeNumber}).json
|
||||
|
||||
for i in json.get('links', []) + json.get('special', []):
|
||||
itemlist.append(Item(url=i.get('link')))
|
||||
return support.server(item, itemlist=itemlist)
|
||||
@@ -1145,7 +1145,10 @@ def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page=
|
||||
if next_page != "":
|
||||
if resub: next_page = re.sub(resub[0], resub[1], next_page)
|
||||
if 'http' not in next_page:
|
||||
next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page)
|
||||
if '/' in next_page:
|
||||
next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page)
|
||||
else:
|
||||
next_page = '/'.join(item.url.split('/')[:-1]) + '/' + next_page
|
||||
next_page = next_page.replace('&', '&')
|
||||
logger.debug('NEXT= ', next_page)
|
||||
itemlist.append(
|
||||
@@ -1368,15 +1371,27 @@ def addQualityTag(item, itemlist, data, patron):
|
||||
info('nessun tag qualità trovato')
|
||||
|
||||
def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False):
|
||||
from core import jsontools
|
||||
|
||||
video_urls = []
|
||||
block = scrapertools.find_single_match(data, r'sources:\s*\[([^\]]+)\]') if not dataIsBlock else data
|
||||
block = scrapertools.find_single_match(data, r'sources:\s*([^\]]+\])') if not dataIsBlock else data
|
||||
if block:
|
||||
if 'file:' in block:
|
||||
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
|
||||
elif 'src:' in block:
|
||||
sources = scrapertools.find_multiple_matches(block, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:,[^,]+,\s*label:\s*"([^"]+)")?')
|
||||
json = jsontools.load(block)
|
||||
if json:
|
||||
sources = []
|
||||
for s in json:
|
||||
if 'file' in s.keys():
|
||||
src = s['file']
|
||||
else:
|
||||
src = s['src']
|
||||
sources.append((src, s.get('label')))
|
||||
else:
|
||||
sources =[(block.replace('"',''), '')]
|
||||
if 'file:' in block:
|
||||
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
|
||||
elif 'src:' in block:
|
||||
sources = scrapertools.find_multiple_matches(block, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:,[^,]+,\s*label:\s*"([^"]+)")?')
|
||||
else:
|
||||
sources =[(block.replace('"',''), '')]
|
||||
for url, quality in sources:
|
||||
quality = 'auto' if not quality else quality
|
||||
if url.split('.')[-1] != 'mpd':
|
||||
|
||||
@@ -10,11 +10,14 @@ def test_video_exists(page_url):
|
||||
global data
|
||||
|
||||
# page_url = re.sub('://[^/]+/', '://feurl.com/', page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
page = httptools.downloadpage(page_url)
|
||||
data = page.data
|
||||
if "Sorry 404 not found" in data or "This video is unavailable" in data or "Sorry this video is unavailable:" in data:
|
||||
return False, config.get_localized_string(70449) % "fembed"
|
||||
page_url = page_url.replace("/f/","/v/")
|
||||
page_url = page_url.replace("/v/","/api/source/")
|
||||
|
||||
page_url = page.url
|
||||
page_url = page_url.replace("/f/", "/v/")
|
||||
page_url = page_url.replace("/v/", "/api/source/")
|
||||
data = httptools.downloadpage(page_url, post={}).json
|
||||
logger.debug(data)
|
||||
if "Video not found or" in data or "We are encoding this video" in data:
|
||||
@@ -26,9 +29,9 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
for file in data['data']:
|
||||
media_url = file['file']
|
||||
label = file['label']
|
||||
extension = file['type']
|
||||
video_urls.append([ extension + ' ' + label + ' [Fembed]', media_url])
|
||||
media_url = file['file']
|
||||
label = file['label']
|
||||
extension = file['type']
|
||||
video_urls.append([ extension + ' ' + label + ' [Fembed]', media_url])
|
||||
video_urls.sort(key=lambda x: int(x[0].split()[1].replace('p','')))
|
||||
return video_urls
|
||||
|
||||
41
servers/hxfile.json
Normal file
41
servers/hxfile.json
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https?://hxfile.co/(?!api)(?:embed-)?([A-z0-9]+)",
|
||||
"url": "https://hxfile.co/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "hxfile",
|
||||
"name": "HxFile",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@70708",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
25
servers/hxfile.py
Normal file
25
servers/hxfile.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools, scrapertools, servertools, support
|
||||
from platformcode import logger, config
|
||||
from lib import jsunpack
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Can't create video code" in data:
|
||||
return False, config.get_localized_string(70292) % 'HxFile'
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.debug("url=" + page_url)
|
||||
global data
|
||||
video_urls = []
|
||||
packed = scrapertools.find_single_match(data, r'(eval\s?\(function\(p,a,c,k,e,d\).*?\n)')
|
||||
data = jsunpack.unpack(packed)
|
||||
video_urls.extend(support.get_jwplayer_mediaurl(data, 'HxFile'))
|
||||
|
||||
return video_urls
|
||||
43
servers/playtube.json
Normal file
43
servers/playtube.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "playtube.ws/(?:embed-|)(\\w+)",
|
||||
"url": "https://playtube.ws/embed-\\1.html"
|
||||
}
|
||||
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "playtube",
|
||||
"name": "playtube",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://i.postimg.cc/8CVV6DnF/playtube.png"
|
||||
}
|
||||
29
servers/playtube.py
Normal file
29
servers/playtube.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector playtube By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
import re
|
||||
import codecs
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
global data
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code == 404 or "File is no longer available" in data.data:
|
||||
return False, "[playtube] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
pack = scrapertools.find_single_match(data.data, 'p,a,c,k,e,d.*?</script>')
|
||||
unpacked = jsunpack.unpack(pack)
|
||||
url = scrapertools.find_single_match(unpacked, 'file:"([^"]+)') + "|referer=%s" %(page_url)
|
||||
video_urls.append(['m3u8 [playtube]', url] )
|
||||
return video_urls
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https?://vidmoly.net/(?:embed-)?(\\w+)\\.html",
|
||||
"pattern": "https?://vidmoly.(?:net|to)/(?:embed-)?(\\w+)\\.html",
|
||||
"url": "https://vidmoly.net/embed-\\1.html"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://youdbox.com/embed-[A-z0-9-]+.html)",
|
||||
"url": "\\1"
|
||||
"pattern": "https://youdbox.(?:com|net)/embed-([A-z0-9-]+.html)",
|
||||
"url": "https://youdbox.net/embed-\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
import codecs
|
||||
|
||||
|
||||
def get_video_url(page_url, video_password):
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
|
||||
list = scrapertools.find_single_match(data, 'var [a-zA-Z0-9]+ = ([^\]]+)').replace('[', '').replace('"', '').replace('\\x', '').replace(',', ' ')
|
||||
list = list.split()[::-1]
|
||||
url =""
|
||||
for elem in list:
|
||||
decoded = codecs.decode(elem, "hex")
|
||||
url += decoded.decode("utf8")
|
||||
url= scrapertools.find_single_match(url, '<source src="([^"]+)"')
|
||||
video_urls.append(["[youdbox]", url])
|
||||
return video_urls
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user