diff --git a/channels/eurostreaming_actor.py b/channels/eurostreaming_actor.py index cdb4efa5..d7caf826 100644 --- a/channels/eurostreaming_actor.py +++ b/channels/eurostreaming_actor.py @@ -48,7 +48,7 @@ def episodios(item): data = support.match(support.match(data, patron=r'

').match, headers=headers).data patronBlock = r'tab-content(?P.*?)serie-player' - patron = r'data-link="(?Phttp.*?)".*?data.num..(?P\d+)x(?P\d+)" data-title="(?P[^"]+)' + patron = r'data.num..(?P<season>\d+)x(?P<episode>\d+)" data-title="(?P<title>[^"]+).*?data-link="(?P<url>http.*?)</li>' return locals() diff --git a/core/support.py b/core/support.py index 5ea884dc..e72fbb63 100755 --- a/core/support.py +++ b/core/support.py @@ -1487,7 +1487,7 @@ def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False, hls= for url, quality in sources: quality = 'auto' if not quality else quality if url.split('.')[-1] != 'mpd': - _type = url.split('.')[-1].split('?')[0] + _type = url.split('?')[0].split('.')[-1] if _type == 'm3u8' and hls: _type = 'hls' video_urls.append([_type + ' [' + quality + '] [' + srvName + ']', url.replace(' ', '%20') if not onlyHttp else url.replace('https://', 'http://')]) diff --git a/servers/dropload.json b/servers/dropload.json new file mode 100644 index 00000000..509d2545 --- /dev/null +++ b/servers/dropload.json @@ -0,0 +1,29 @@ +{ + "active": true, + "find_videos": { + "patterns": [ + { + "pattern": "https?://dropload.io/(\\w{12})", + "url": "https://dropload.io/\\1" + }, + { + "pattern": "https?://dropload.io/embed-(\\w{12}).html", + "url": "https://dropload.io/\\1" + } + ] + }, + "free": true, + "id": "dropload", + "name": "Dropload", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "$ADDON[plugin.video.kod 70708]", + "type": "bool", + "visible": true + } + ], + "cloudflare": true +} diff --git a/servers/dropload.py b/servers/dropload.py new file mode 100644 index 00000000..ae32b0e1 --- /dev/null +++ b/servers/dropload.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +try: + import urllib.parse as urllib +except ImportError: + import urllib + +from core import httptools, support +from core import scrapertools +from platformcode import logger, config + + +def test_video_exists(page_url): + logger.debug("(page_url='%s')" % page_url) + global data + data = httptools.downloadpage(page_url).data + + if "File Not Found" in data: + return False, config.get_localized_string(70449) % "Dropload" + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + logger.debug(" url=" + page_url) + video_urls = [] + + global data + vres = scrapertools.find_multiple_matches(data, 'nowrap[^>]+>([^,]+)') + if not vres: vres = scrapertools.find_multiple_matches(data, '<td>(\d+x\d+)') + + data_pack = scrapertools.find_single_match(data, "</div>\n\s*<script[^>]+>(eval.function.p,a,c,k,e,.*?)\s*</script>") + if data_pack != "": + from lib import jsunpack + data = jsunpack.unpack(data_pack) + + _headers = urllib.urlencode(httptools.default_headers) + video_urls = support.get_jwplayer_mediaurl(data, 'dropload') + + return sorted(video_urls, key=lambda x: int(x[0].split('x')[0])) if vres else video_urls