aggiunti server anavids, streamtape, vidmoly

fix animeworld
This commit is contained in:
mac12m99
2020-05-18 14:29:48 +02:00
committed by marco
parent 1c7c2383ca
commit 39bcb12922
14 changed files with 286 additions and 84 deletions

View File

@@ -138,5 +138,5 @@ def findvideos(item):
support.log('findvideos', item)
return support.hdpass_get_servers(item)
def play(item):
return support.hdpass_get_url(item)
# def play(item):
# return support.hdpass_get_url(item)

View File

@@ -145,48 +145,44 @@ def findvideos(item):
import time
support.log(item)
itemlist = []
matches = support.match(item, patron=r'class="tab.*?data-name="([0-9]+)">', headers=headers)
matches = support.match(item, patron=r'data-name="([0-9]+)">', headers=headers)
data = matches.data
matches = matches.matches
videoData = ''
videoData = []
for serverid in matches:
if not item.number: item.number = support.scrapertools.find_single_match(item.title, r'(\d+) -')
block = support.scrapertools.find_multiple_matches(data, 'data-id="' + serverid + '">(.*?)<div class="server')
ID = support.scrapertools.find_single_match(str(block), r'<a data-id="([^"]+)" data-base="' + (item.number if item.number else '1') + '"')
support.log('ID= ',serverid)
if id:
if serverid == '26':
matches = support.match('%s/ajax/episode/serverPlayer?id=%s' % (host, item.url.split('/')[-1]), patron=r'<a href="([^"]+)"', ).matches
for url in matches:
videoData += '\n' + url
else:
try:
dataJson = support.httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, ID, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
support.log(json)
if 'keepsetsu' in json['grabber']:
matches = support.match(json['grabber'], patron=r'<iframe\s*src="([^"]+)"'),matches
for url in matches:
videoData += '\n' + url
else:
videoData += '\n' + json['grabber']
if not item.number: item.number = support.match(item.title, patron=r'(\d+) -').match
block = support.match(data, patron=r'data-id="' + serverid + r'">(.*?)<div class="server').match
ID = support.match(block, patron=r'<a data-id="([^"]+)" data-base="' + (item.number if item.number else '1') + '"').match
if serverid == '18':
url = support.match('%s/ajax/episode/serverPlayer?id=%s' % (host, ID), patron=r'source src="([^"]+)"', debug=False).match
itemlist.append(
support.Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=url,
server='directo',
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
elif serverid == '26':
matches = support.match('%s/ajax/episode/serverPlayer?id=%s' % (host, item.url.split('/')[-1]), patron=r'<a href="([^"]+)"', ).matches
for url in matches:
videoData.append(url)
else:
try:
dataJson = support.match('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, ID, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
support.log(json)
videoData.append(json['grabber'])
except:
pass
if serverid == '28':
itemlist.append(
support.Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=json['grabber'],
server='directo',
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
except:
pass
return support.server(item, videoData, itemlist)

View File

@@ -126,6 +126,3 @@ def newest(categoria):
def findvideos(item):
support.log('findvideos ->', item)
return support.hdpass_get_servers(item)
def play(item):
return support.hdpass_get_url(item)

View File

@@ -143,6 +143,3 @@ def findvideos(item):
matches = support.match(url,patron=r'<a href="([^"]+)">(\d+)<', patronBlock=r'<h3>EPISODIO</h3><ul>(.*?)</ul>').matches
if matches: item.url = support.urlparse.urljoin(url, matches[-1][0])
return support.hdpass_get_servers(item)
def play(item):
return support.hdpass_get_url(item)

View File

@@ -34,38 +34,44 @@ from specials import autoplay
def hdpass_get_servers(item):
def get_hosts(url, quality):
ret = []
page = httptools.downloadpage(url).data
page = httptools.downloadpage(url, CF=False).data
mir = scrapertools.find_single_match(page, patron_mir)
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
mir_url = scrapertools.decodeHtmlentities(mir_url)
ret.append(Item(channel=item.channel,
action="play",
fulltitle=item.fulltitle,
quality=quality,
show=item.show,
thumbnail=item.thumbnail,
contentType=item.contentType,
title=srv,
server=srv,
url= mir_url))
with futures.ThreadPoolExecutor() as executor:
thL = []
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
mir_url = scrapertools.decodeHtmlentities(mir_url)
log(mir_url)
it = Item(channel=item.channel,
action="play",
fulltitle=item.fulltitle,
quality=quality,
show=item.show,
thumbnail=item.thumbnail,
contentType=item.contentType,
title=srv,
# server=srv,
url= mir_url)
thL.append(executor.submit(hdpass_get_url, it))
for res in futures.as_completed(thL):
if res.result():
ret.append(res.result()[0])
return ret
# Carica la pagina
itemlist = []
if 'hdpass' in item.url or 'hdplayer' in item.url:
url = item.url
else:
data = httptools.downloadpage(item.url).data.replace('\n', '')
data = httptools.downloadpage(item.url, CF=False).data.replace('\n', '')
patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
url = scrapertools.find_single_match(data, patron).replace("?alta", "")
url = scrapertools.find_single_match(data, patron)
url = url.replace("&download=1", "")
if 'hdpass' not in url and 'hdplayer' not in url:
return itemlist
if not url.startswith('http'):
url = 'https:' + url
data = httptools.downloadpage(url).data
data = httptools.downloadpage(url, CF=False).data
patron_res = '<div class="buttons-bar resolutions-bar">(.*?)<div class="buttons-bar'
patron_mir = '<div class="buttons-bar hosts-bar">(.*?)<div id="fake'
patron_option = r'<a href="([^"]+?)".*?>([^<]+?)</a>'
@@ -84,7 +90,7 @@ def hdpass_get_servers(item):
def hdpass_get_url(item):
patron_media = r'<iframe allowfullscreen custom-src="([^"]+)'
data = httptools.downloadpage(item.url).data
data = httptools.downloadpage(item.url, CF=False).data
item.url = base64.b64decode(scrapertools.find_single_match(data, patron_media))
return [item]
@@ -1126,7 +1132,7 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru
item.title = typo(item.contentTitle.strip(),'bold') if item.contentType == 'movie' or (config.get_localized_string(30161) in item.title) else item.title
videoitem.plot= typo(videoitem.title, 'bold') + typo(videoitem.quality, '_ [] bold')
videoitem.plot= typo(videoitem.title, 'bold') + (typo(videoitem.quality, '_ [] bold') if item.quality else '')
videoitem.title = (item.title if item.channel not in ['url'] else '') + (typo(videoitem.title, '_ color kod [] bold') if videoitem.title else "") + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "")
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
@@ -1291,3 +1297,17 @@ def addQualityTag(item, itemlist, data, patron):
folder=False))
else:
log('nessun tag qualità trovato')
def get_jwplayer_mediaurl(data, srvName):
video_urls = []
block = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
if not sources:
sources = scrapertools.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+",[^,]+,\s*label:\s*"([^"]+)"')
for url, quality in sources:
quality = 'auto' if not quality else quality
if url.split('.')[-1] != 'mpd':
video_urls.append(['.' + url.split('.')[-1] + ' [' + quality + '] [' + srvName + ']', url])
video_urls.sort(key=lambda x: x[0].split()[1])
return video_urls

42
servers/anavids.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "anavids.com/((?:embed-)?[0-9a-zA-Z]+)",
"url": "https://anavids.com/\\1.html"
}
]
},
"free": true,
"id": "anavids",
"name": "AnaVids",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://supervideo.tv/images/logo-player.png"
}

22
servers/anavids.py Normal file
View File

@@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
from core import httptools, support
from core import scrapertools
from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'File you are looking for is not found.' in data:
return False, config.get_localized_string(70449) % "AvaVids"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
video_urls = support.get_jwplayer_mediaurl(data, 'AvaVids')
return video_urls

View File

@@ -1,8 +1,12 @@
# -*- coding: utf-8 -*-
import urllib
import sys
if sys.version_info[0] >= 3:
import urllib.parse as urllib
else:
import urllib
from core import httptools, jsontools
from platformcode import logger
from platformcode import logger, config
def test_video_exists(page_url):
@@ -11,7 +15,7 @@ def test_video_exists(page_url):
data_json = httptools.downloadpage(page_url.replace('/v/', '/api/source/'), headers=[['x-requested-with', 'XMLHttpRequest']], post=post).data
json = jsontools.load(data_json)
if not json['data']:
return False, "Video not found"
return False, config.get_localized_string(70449) % "AnimeWorld"
return True, ""
@@ -27,7 +31,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
media_url = file['file']
label = file['label']
extension = file['type']
video_urls.append([label + " " + extension + ' [animeworld]', media_url])
video_urls.append([label + " " + extension + ' [AnimeWorld]', media_url])
return video_urls

View File

@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import httptools, support
from core import scrapertools
from platformcode import config, logger
@@ -17,15 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
# logger.info(data)
block = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
if not sources:
sources = scrapertools.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+",[^,]+,\s*label:\s*"([^"]+)"')
for url, quality in sources:
quality = 'auto' if not quality else quality
video_urls.append(['.' + url.split('.')[-1] + ' [' + quality + '] [Onlystream]', url])
video_urls.sort(key=lambda x: x[0].split()[1])
video_urls = support.get_jwplayer_mediaurl(data, 'Onlystream')
return video_urls

42
servers/streamtape.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https?://streamtape\\.com/e/(\\w+)",
"url": "https://streamtape.com/e/\\1"
}
]
},
"free": true,
"id": "streamtape",
"name": "Streamtape",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

27
servers/streamtape.py Normal file
View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "Video not found" in data:
return False, config.get_localized_string(70449) % "Streamtape"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
global data
url = scrapertools.find_single_match(data, 'id="videolink"[^>]+>\n?\s*//(.*?)<')
if url:
media_url = 'https://' + url + '&stream=1'
video_urls.append([".mp4 [Streamtape]", media_url])
return video_urls

View File

@@ -3,7 +3,7 @@
import time
import urllib
from core import httptools
from core import httptools, support
from core import scrapertools
from platformcode import logger, config
@@ -29,9 +29,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
time.sleep(6)
data = httptools.downloadpage(page_url_post, post=post).data
logger.info("(data page_url='%s')" % data)
sources = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
for media_url in scrapertools.find_multiple_matches(sources, '"([^"]+)"'):
ext = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append(["%s [%s]" % (ext, server), media_url])
video_urls = support.get_jwplayer_mediaurl(data, 'Turbovid')
return video_urls

46
servers/vidmoly.json Normal file
View File

@@ -0,0 +1,46 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https?://vidmoly.net/(?:embed-)?(\\w+)\\.html",
"url": "https://vidmoly.net/embed-\\1.html"
},
{
"pattern": "https?://fasturl.ga/(?:embed-)?(\\w+)\\.html",
"url": "https://vidmoly.net/embed-\\1.html"
}
]
},
"free": true,
"id": "vidmoly",
"name": "Vidmoly",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

21
servers/vidmoly.py Normal file
View File

@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
from core import httptools, support
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "Not Found" in data:
return False, config.get_localized_string(70449) % "Vidmoly"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
global data
video_urls = support.get_jwplayer_mediaurl(data, 'Vidmoly')
return video_urls