diff --git a/servers/akvideo.py b/servers/akvideo.py
index 998a6940..3e95b6ca 100644
--- a/servers/akvideo.py
+++ b/servers/akvideo.py
@@ -5,7 +5,7 @@ import urllib, re
from core import httptools
from core import scrapertools
-from platformcode import logger, config
+from platformcode import logger, config, platformtools
from core.support import dbg
@@ -40,7 +40,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# dbg()
global data
- logger.info('PAGE DATA' + data)
+ # logger.info('PAGE DATA' + data)
+ # sitekey = scrapertools.find_single_match(data, 'data-sitekey="([^"]+)')
+ sitekey = '6LeNU5IUAAAAAPNs_w-s8Rc-X2C2SPE3UW8lkkjW'
+ # from core import support
+ # support.dbg()
+ captcha = platformtools.show_recaptcha(sitekey, page_url) if sitekey else ''
+
+ if captcha:
+ data = httptools.downloadpage(page_url, post={'g-recaptcha-response': captcha}).data
vres = scrapertools.find_multiple_matches(data, 'nowrap[^>]+>([^,]+)')
if not vres: vres = scrapertools.find_multiple_matches(data, '
(\d+x\d+)')
diff --git a/servers/anonfile.py b/servers/anonfile.py
index 212c9ae1..911c2b73 100644
--- a/servers/anonfile.py
+++ b/servers/anonfile.py
@@ -20,15 +20,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
- patron = 'id="download-quality-(\w+).*?href="([^"]+)"'
+ patron = 'download-url.*?href="([^"]+)"'
match = scrapertools.find_multiple_matches(data, patron)
- for calidad, media_url in match:
- title = "%s [anonfile]" % (calidad)
- video_urls.append([title, media_url, int(calidad.replace("p", ""))])
-
- video_urls.sort(key=lambda x: x[2])
- for video_url in video_urls:
- video_url[2] = 0
- logger.info("%s - %s" % (video_url[0], video_url[1]))
-
+ for media_url in match:
+ media_url += "|Referer=%s" %page_url
+ title = "mp4 [anonfile]"
+ video_urls.append([title, media_url])
return video_urls
diff --git a/servers/archiveorg.json b/servers/archiveorg.json
index 3f75ffd5..73f07ffa 100644
--- a/servers/archiveorg.json
+++ b/servers/archiveorg.json
@@ -10,8 +10,8 @@
]
},
"free": true,
- "id": "ArchiveOrg",
- "name": "archiveorg",
+ "id": "archiveorg",
+ "name": "ArchiveOrg",
"settings": [
{
"default": false,
diff --git a/servers/badshare.json b/servers/badshare.json
new file mode 100644
index 00000000..78624986
--- /dev/null
+++ b/servers/badshare.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "badshare.io/(?:plugins/mediaplayer/site/_embed.php\\?u=|)([A-z0-9]+)",
+ "url": "https://badshare.io/\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "badshare",
+ "name": "badshare",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://badshare.io/badshare_logo.png"
+}
diff --git a/servers/badshare.py b/servers/badshare.py
new file mode 100644
index 00000000..f613fbda
--- /dev/null
+++ b/servers/badshare.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# --------------------------------------------------------
+# Conector Badshare By Alfa development Group
+# --------------------------------------------------------
+
+import re
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ global page
+ page = httptools.downloadpage(page_url)
+ if not page.sucess:
+ return False, "[Badshare] El fichero no existe o ha sido borrado"
+ return True, ""
+
+
+def get_video_url(page_url, premium=False, user="", password="", video_password=""):
+ logger.info("url=" + page_url)
+ video_urls = []
+ ext = '.mp4'
+
+ data = page.data
+ data = re.sub(r'\n|\r|\t|\s{2,}', "", data)
+ media_url, ext = scrapertools.find_single_match(data, r'file:\s*"([^"]+)",type:\s*"([^"]+)"')
+
+ video_urls.append(["%s [Badshare]" % ext, media_url])
+
+ return video_urls
diff --git a/servers/bdupload.json b/servers/bdupload.json
index e2e39312..8bb33b1b 100644
--- a/servers/bdupload.json
+++ b/servers/bdupload.json
@@ -4,8 +4,12 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "(https://bdupload.info/[A-z0-9]+)",
+ "pattern": "(https://bdupload.(?:info|asia)/[A-z0-9]+)",
"url": "\\1"
+ },
+ {
+ "pattern": "https://dl.bdupload.(?:info|asia|in)/([A-z0-9]+)",
+ "url": "https://bdupload.asia/\\1"
}
]
},
diff --git a/servers/bdupload.py b/servers/bdupload.py
index 0bb284df..4186ad96 100644
--- a/servers/bdupload.py
+++ b/servers/bdupload.py
@@ -29,7 +29,8 @@ def get_video_url(page_url, user="", password="", video_password=""):
data1 = httptools.downloadpage(page_url, post = post, headers = headers).data
patron = "window.open\('([^']+)"
file = scrapertools.find_single_match(data1, patron).replace(" ","%20")
- file += "|User-Agent=" + headers['User-Agent']
+ file += "|User-Agent=" + httptools.get_user_agent()
+ file += "&Host=fs30.indifiles.com:182"
video_urls = []
videourl = file
video_urls.append([".MP4 [bdupload]", videourl])
diff --git a/servers/bitertv.json b/servers/bitertv.json
index 0b0d365f..9a4caa4c 100644
--- a/servers/bitertv.json
+++ b/servers/bitertv.json
@@ -6,6 +6,10 @@
{
"pattern": "(http://b.ter.tv/v/[A-z0-9]+)",
"url": "\\1"
+ },
+ {
+ "pattern": "(https://byter.tv/v/[A-z0-9]+)",
+ "url": "\\1"
}
]
},
diff --git a/servers/bitp.json b/servers/bitp.json
index 8c1e491f..26558cc2 100644
--- a/servers/bitp.json
+++ b/servers/bitp.json
@@ -7,6 +7,10 @@
"pattern": "https://www.bitporno.com/(?:e|embed)/([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
},
+ {
+ "pattern": "https://www.bitporno.com/\\?v=([A-z0-9]+)",
+ "url": "https://www.bitporno.com/e/\\1"
+ },
{
"pattern": "raptu.com/(?:\\?v=|embed/|e/|v/)([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
diff --git a/servers/bitp.py b/servers/bitp.py
index 555e4215..7549b129 100644
--- a/servers/bitp.py
+++ b/servers/bitp.py
@@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
- videourl = scrapertools.find_multiple_matches(data, 'WE ARE SORRY" in data or '404 Not Found' in data:
+ return False, "[%s] El fichero no existe o ha sido borrado" %server
+ return True, ""
+
+
+def get_video_url(page_url, video_password):
+ logger.info("(page_url='%s')" % page_url)
+ video_urls = []
+ patron = '|\s{2,}', "", data)
- patron = "source: '([^']+)',"
+ patron = 'file: "([^"]+)",'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
+ url += "|Referer=%s" %page_url
video_urls.append(['.m3u8 [CinemaUpload]', url])
return video_urls
diff --git a/servers/clicknupload.json b/servers/clicknupload.json
index f8fe6556..a3ccd30e 100644
--- a/servers/clicknupload.json
+++ b/servers/clicknupload.json
@@ -13,7 +13,8 @@
"id": "clicknupload",
"name": "clicknupload",
"premium": [
- "realdebrid"
+ "realdebrid",
+ "alldebrid"
],
"settings": [
{
diff --git a/servers/clicknupload.py b/servers/clicknupload.py
old mode 100644
new mode 100755
index d5a7999f..05a1421e
--- a/servers/clicknupload.py
+++ b/servers/clicknupload.py
@@ -1,6 +1,15 @@
# -*- coding: utf-8 -*-
-import urllib
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+
+if PY3:
+ #from future import standard_library
+ #standard_library.install_aliases()
+ import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
+else:
+ import urllib # Usamos el nativo de PY2 que es más rápido
from core import httptools
from core import scrapertools
@@ -48,7 +57,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
def get_data(url_orig, req_post=""):
try:
if not excption:
- response = httptools.downloadpage(url_orig, req_post)
+ response = httptools.downloadpage(url_orig, post=req_post)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
global excption
excption = True
@@ -57,7 +66,6 @@ def get_data(url_orig, req_post=""):
else:
raise Exception
except:
- import urllib
post = {"address": url_orig.replace(".me", ".org")}
if req_post:
post["options"] = [{"man": "--data", "attribute": req_post}]
diff --git a/servers/clipwatching.json b/servers/clipwatching.json
index c72359fa..2e80d637 100644
--- a/servers/clipwatching.json
+++ b/servers/clipwatching.json
@@ -4,14 +4,18 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "clipwatching.com/((?:embed-)?[a-zA-Z0-9./_\\-\\[\\]\\(\\)]+).html",
- "url": "http://clipwatching.com/\\1.html"
+ "pattern": "clipwatching.com/(e.*?.html)",
+ "url": "http://clipwatching.com/\\1"
+ },
+ {
+ "pattern": "clipwatching.com/(\\w+)",
+ "url": "http://clipwatching.com/embed-\\1.html"
}
]
},
"free": true,
"id": "clipwatching",
- "name": "ClipWatching",
+ "name": "clipwatching",
"settings": [
{
"default": false,
diff --git a/servers/clipwatching.py b/servers/clipwatching.py
index 03abba2d..8e8d962f 100644
--- a/servers/clipwatching.py
+++ b/servers/clipwatching.py
@@ -1,14 +1,13 @@
# -*- coding: utf-8 -*-
-import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger, config
-
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
+ global data
data = httptools.downloadpage(page_url).data
if "File Not Found" in data or "File was deleted" in data:
return False, config.get_localized_string(70292) % "ClipWatching"
@@ -17,18 +16,17 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
- data = httptools.downloadpage(page_url).data
- data = re.sub('\t|\n','',data)
- logger.info('CLIP DATA= ' + data)
- packed = scrapertools.find_single_match(data, r"text/javascript'>(.*?)\s*")
- try: unpacked = jsunpack.unpack(packed)
- except: unpacked = data
video_urls = []
- videos = scrapertools.find_multiple_matches(unpacked, r'(?:file|src):\s*"([^"]+).*?type:\s*"video/([^"]+)".*?label:\s*"([^"]+)')
- for video, Type, label in videos:
- logger.info(Type)
- logger.info(label)
+ try:
+ packed = scrapertools.find_single_match(data, "text/javascript'>(eval.*?)\s*")
+ unpacked = jsunpack.unpack(packed)
+ except:
+ unpacked = scrapertools.find_single_match(data,"window.hola_player.*")
+ videos = scrapertools.find_multiple_matches(unpacked, r'(?:file|src):\s*"([^"]+).*?label:\s*"([^"]+)')
+ for video, label in videos:
if ".jpg" not in video:
- video_urls.append(['%s [%sp] [ClipWatching]' % (Type, label), video])
- video_urls.sort(key=lambda x: x[0].split()[1])
+ if not label.endswith('p'):
+ label += 'p'
+ video_urls.append([label + " [clipwatching]", video])
+ video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls
diff --git a/servers/cloudvideo.json b/servers/cloudvideo.json
index 67daa35b..974dea77 100644
--- a/servers/cloudvideo.json
+++ b/servers/cloudvideo.json
@@ -4,14 +4,14 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "cloudvideo.tv/(?:embed-)?([a-z0-9]+)(?:.html)?",
+ "pattern": "cloudvideo.tv/(?:embed-|)([a-z0-9]+)(?:.html|)",
"url": "https://cloudvideo.tv/embed-\\1.html"
}
]
},
"free": true,
"id": "cloudvideo",
- "name": "CloudVideo",
+ "name": "cloudvideo",
"settings": [
{
"default": false,
diff --git a/servers/crunchyroll.json b/servers/crunchyroll.json
index 815a2ed7..330afc53 100644
--- a/servers/crunchyroll.json
+++ b/servers/crunchyroll.json
@@ -36,48 +36,6 @@
],
"type": "list",
"visible": false
- },
- {
- "default": false,
- "enabled": true,
- "id": "premium",
- "label": "Activar cuenta premium",
- "type": "bool",
- "visible": true
- },
- {
- "default": "",
- "enabled": "eq(-1,true)",
- "id": "user",
- "label": "@30014",
- "type": "text",
- "visible": true
- },
- {
- "default": "",
- "enabled": "eq(-2,true)+!eq(-1,'')",
- "hidden": true,
- "id": "password",
- "label": "@30015",
- "type": "text",
- "visible": true
- },
- {
- "default": 0,
- "enabled": "eq(-3,true)",
- "id": "sub",
- "label": "Idioma de subtítulos preferido",
- "lvalues": [
- "Español España",
- "Español Latino",
- "Inglés",
- "Italiano",
- "Francés",
- "Portugués",
- "Alemán"
- ],
- "type": "list",
- "visible": true
}
],
"thumbnail": "http://i.imgur.com/SglkLAb.png?1"
diff --git a/servers/crunchyroll.py b/servers/crunchyroll.py
old mode 100644
new mode 100755
index e1bc2e68..75dae4a8
--- a/servers/crunchyroll.py
+++ b/servers/crunchyroll.py
@@ -1,5 +1,17 @@
# -*- coding: utf-8 -*-
+from builtins import range
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+
+if PY3:
+ #from future import standard_library
+ #standard_library.install_aliases()
+ import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
+else:
+ import urllib # Usamos el nativo de PY2 que es más rápido
+
import base64
import struct
import zlib
@@ -11,15 +23,14 @@ from core import scrapertools
from platformcode import config, logger
GLOBAL_HEADER = {'User-Agent': 'Mozilla/5.0', 'Accept-Language': '*'}
-proxy = "http://anonymouse.org/cgi-bin/anon-www.cgi/"
+proxy_i = "https://www.usa-proxy.org/index.php"
+proxy = "https://www.usa-proxy.org/"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
- premium = config.get_setting("premium", server="crunchyroll")
- if premium:
- return login(page_url)
- data = httptools.downloadpage(page_url, headers=GLOBAL_HEADER, replace_headers=True).data
+
+ data = httptools.downloadpage(page_url, headers=GLOBAL_HEADER).data
if "Este es un clip de muestra" in data:
disp = scrapertools.find_single_match(data, '.*?\s*(.*?)')
disp = disp.strip()
@@ -30,6 +41,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
+ #page_url='https://www.crunchyroll.com/es-es/one-piece/episode-891-climbing-up-a-waterfall-a-great-journey-through-the-land-of-wanos-sea-zone-786643'
logger.info("url=" + page_url)
video_urls = []
if "crunchyroll.com" in page_url:
@@ -39,10 +51,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url = "https://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s" \
"&video_format=0&video_quality=0&auto_play=0&aff=af-12299-plwa" % media_id
post = "current_page=%s" % page_url
- data = httptools.downloadpage(url, post, headers=GLOBAL_HEADER, replace_headers=True).data
+ data = httptools.downloadpage(url, post=post, headers=GLOBAL_HEADER).data
+
if "Media not available" in data or "flash_block.png" in data:
- data = httptools.downloadpage(proxy + url, post, headers=GLOBAL_HEADER, replace_headers=True,
- cookies=False).data
+ httptools.downloadpage(proxy_i)
+ url = urllib.quote(url)
+ get = '%sbrowse.php?u=%s&b=4' % (proxy, url)
+ data = httptools.downloadpage(get, post=post, headers=GLOBAL_HEADER).data
media_url = scrapertools.find_single_match(data, '(.*?)').replace("&", "&")
if not media_url:
return video_urls
@@ -54,18 +69,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
filename = scrapertools.get_filename_from_url(media_url)[-4:]
quality = scrapertools.find_single_match(data, '(.*?)')
try:
- idiomas = ['Español \(España\)', 'Español\]', 'English', 'Italiano', 'Français', 'Português', 'Deutsch']
- index_sub = int(config.get_setting("sub", server="crunchyroll"))
+ #idiomas = ['Español \(España\)', 'Español\]', 'English', 'Italiano', 'Français', 'Português', 'Deutsch']
+ idiomas = ['Deutsch', 'Português', 'Français', 'Italiano', 'English', 'Español\]', 'Español \(España\)']
+ index_sub = int(config.get_setting("crunchyrollsub", "crunchyroll"))
idioma_sub = idiomas[index_sub]
+
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[%s" % idioma_sub)
- if not link_sub and index_sub == 0:
+ if not link_sub and index_sub == 6:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español\]")
- elif not link_sub and index_sub == 1:
+ elif not link_sub and index_sub == 5:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español \(España\)")
if not link_sub:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[English")
- data_sub = httptools.downloadpage(link_sub.replace("&", "&"), headers=GLOBAL_HEADER,
- replace_headers=True).data
+ data_sub = httptools.downloadpage(link_sub.replace("&", "&"), headers=GLOBAL_HEADER).data
id_sub = scrapertools.find_single_match(data_sub, "subtitle id='([^']+)'")
iv = scrapertools.find_single_match(data_sub, '(.*?)')
data_sub = scrapertools.find_single_match(data_sub, '(.*?)')
@@ -84,13 +100,13 @@ def login(page_url):
login_page = "https://www.crunchyroll.com/login"
user = config.get_setting("user", server="crunchyroll")
password = config.get_setting("password", server="crunchyroll")
- data = httptools.downloadpage(login_page, headers=GLOBAL_HEADER, replace_headers=True).data
+ data = httptools.downloadpage(login_page, headers=GLOBAL_HEADER).data
if not "Redirecting" in data:
token = scrapertools.find_single_match(data, 'name="login_form\[_token\]" value="([^"]+)"')
redirect_url = scrapertools.find_single_match(data, 'name="login_form\[redirect_url\]" value="([^"]+)"')
post = "login_form%5Bname%5D=" + user + "&login_form%5Bpassword%5D=" + password + \
"&login_form%5Bredirect_url%5D=" + redirect_url + "&login_form%5B_token%5D=" + token
- data = httptools.downloadpage(login_page, post, headers=GLOBAL_HEADER, replace_headers=True).data
+ data = httptools.downloadpage(login_page, post=post, headers=GLOBAL_HEADER).data
if "Redirecting" in data:
return True, ""
else:
@@ -108,14 +124,16 @@ def decrypt_subs(iv, data, id):
data = base64.b64decode(data.encode('utf-8'))
iv = base64.b64decode(iv.encode('utf-8'))
id = int(id)
+
def obfuscate_key_aux(count, modulo, start):
output = list(start)
for _ in range(count):
output.append(output[-1] + output[-2])
# cut off start values
output = output[2:]
- output = list(map(lambda x: x % modulo + 33, output))
+ output = list([x % modulo + 33 for x in output])
return output
+
def obfuscate_key(key):
from math import pow, sqrt, floor
num1 = int(floor(pow(2, 25) * sqrt(6.9)))
@@ -130,6 +148,7 @@ def decrypt_subs(iv, data, id):
decshaHash.append(ord(char))
# Extend 160 Bit hash to 256 Bit
return decshaHash + [0] * 12
+
key = obfuscate_key(id)
key = struct.pack('B' * len(key), *key)
decryptor = jscrypto.new(key, 2, iv)
@@ -202,5 +221,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
output += ',' + event.attrib['effect']
output += ',' + event.attrib['text']
output += '\n'
+ output = output.encode('utf-8')
+ if PY3: output = output.decode("utf-8")
- return output.encode('utf-8')
+ return output
diff --git a/servers/dailymotion.py b/servers/dailymotion.py
index 6a7d6d5a..bcbe0d4d 100644
--- a/servers/dailymotion.py
+++ b/servers/dailymotion.py
@@ -8,6 +8,8 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
+ if "Contenido rechazado" in response.data:
+ return False, "[Dailymotion] El archivo no existe o ha sido borrado"
if response.code == 404:
return False, config.get_localized_string(70449) % "dailymotion"
return True, ""
diff --git a/servers/datoporn.py b/servers/datoporn.py
old mode 100644
new mode 100755
index ca5076c8..259582bc
--- a/servers/datoporn.py
+++ b/servers/datoporn.py
@@ -2,6 +2,7 @@
from core import httptools
from core import scrapertools
+from lib import jsunpack
from platformcode import logger
@@ -10,7 +11,7 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
- if 'File Not Found' in data or '404 Not Found' in data:
+ if 'Not Found' in data or 'File is no longer available' in data:
return False, "[Datoporn] El archivo no existe o ha sido borrado"
return True, ""
@@ -18,38 +19,29 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
-
+ video_urls = []
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, 'src: "([^"]+)",.*?label: "([^"]+)"')
- #media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
- # if not media_urls:
- # match = scrapertools.find_single_match(data, "p,a,c,k(.*?)")
- # try:
- # data = jsunpack.unpack(match)
- # except:
- # pass
- # media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
-
- # Extrae la URL
- calidades = []
- video_urls = []
- for media_url in sorted(media_urls, key=lambda x: int(x[1][-3:])):
- calidades.append(int(media_url[1][-3:]))
+ if not media_urls:
+ match = scrapertools.find_single_match(data, "")
try:
- title = ".%s %sp [datoporn]" % (media_url[0].rsplit('.', 1)[1], media_url[1][-3:])
+ data = jsunpack.unpack(match)
except:
- title = ".%s %sp [datoporn]" % (media_url[-4:], media_url[1][-3:])
- video_urls.append([title, media_url[0]])
-
- sorted(calidades)
- m3u8 = scrapertools.find_single_match(data, 'file\:"([^"]+\.m3u8)"')
+ pass
+ media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
+ # Extrae la URL
+ for media_url, res in media_urls:
+ try:
+ title = ".%s %s [datoporn]" % (media_url.rsplit('.', 1)[1], res)
+ except:
+ title = ".%s %s [datoporn]" % (media_url[-4:], res)
+ video_urls.append([title, media_url])
+ m3u8 = scrapertools.find_single_match(data, 'src\:"([^"]+\.m3u8)"')
if not m3u8:
m3u8 = str(scrapertools.find_multiple_matches(data, 'player.updateSrc\({src:.?"([^"]+\.m3u8)"')).replace("['", "").replace("']", "")
calidades = ['720p']
if m3u8:
- video_urls.insert(0, [".m3u8 %s [datoporn]" % calidades[-1], m3u8])
-
+ video_urls.insert(0, [".m3u8 720p [datoporn]" , m3u8])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
-
return video_urls
diff --git a/servers/debriders/realdebrid.json b/servers/debriders/realdebrid.json
old mode 100644
new mode 100755
index 5cddf0d7..df48639d
--- a/servers/debriders/realdebrid.json
+++ b/servers/debriders/realdebrid.json
@@ -11,6 +11,14 @@
"label": "@70272",
"type": "bool",
"visible": true
+ },
+ {
+ "default": "",
+ "enabled": "eq(-1,true)",
+ "id": "token",
+ "label": "Token (autentificación alternativa)",
+ "type": "text",
+ "visible": true
}
]
}
\ No newline at end of file
diff --git a/servers/debriders/realdebrid.py b/servers/debriders/realdebrid.py
old mode 100644
new mode 100755
index 97b516e1..5c7c837e
--- a/servers/debriders/realdebrid.py
+++ b/servers/debriders/realdebrid.py
@@ -1,20 +1,30 @@
# -*- coding: utf-8 -*-
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+
+if PY3:
+ #from future import standard_library
+ #standard_library.install_aliases()
+ import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
+else:
+ import urllib # Usamos el nativo de PY2 que es más rápido
+
import time
-import urllib
from core import httptools
-from core import jsontools
+from core import scrapertools
from platformcode import config, logger
from platformcode import platformtools
-headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'}
+headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:65.0) Gecko/20100101 Firefox/65.0'}
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s' , video_password=%s)" % (page_url, video_password))
-
+ page_url = page_url.replace(".nz/embed", ".nz/")
# Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
token_auth = config.get_setting("token", server="realdebrid")
if token_auth is None or token_auth == "":
@@ -28,11 +38,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
post_link = urllib.urlencode([("link", page_url), ("password", video_password)])
headers["Authorization"] = "Bearer %s" % token_auth
url = "https://api.real-debrid.com/rest/1.0/unrestrict/link"
- data = httptools.downloadpage(url, post=post_link, headers=headers.items()).data
- data = jsontools.load(data)
+ data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
+ logger.error(data)
+
+ check = config.get_setting("secret", server="realdebrid")
+ #Se ha usado la autentificación por urlresolver (Bad Idea)
+ if "error" in data and data["error"] == "bad_token" and not check:
+ token_auth = authentication()
+ headers["Authorization"] = "Bearer %s" % token_auth
+ data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
# Si el token es erróneo o ha caducado, se solicita uno nuevo
- if "error" in data and data["error"] == "bad_token":
+ elif "error" in data and data["error"] == "bad_token":
+
debrid_id = config.get_setting("id", server="realdebrid")
secret = config.get_setting("secret", server="realdebrid")
refresh = config.get_setting("refresh", server="realdebrid")
@@ -40,15 +58,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
post_token = urllib.urlencode({"client_id": debrid_id, "client_secret": secret, "code": refresh,
"grant_type": "http://oauth.net/grant_type/device/1.0"})
renew_token = httptools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post_token,
- headers=headers.items()).data
- renew_token = jsontools.load(renew_token)
+ headers=list(headers.items())).json
if not "error" in renew_token:
token_auth = renew_token["access_token"]
config.set_setting("token", token_auth, server="realdebrid")
headers["Authorization"] = "Bearer %s" % token_auth
- data = httptools.downloadpage(url, post=post_link, headers=headers.items()).data
- data = jsontools.load(data)
-
+ data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
+ else:
+ token_auth = authentication()
+ headers["Authorization"] = "Bearer %s" % token_auth
+ data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
if "download" in data:
return get_enlaces(data)
else:
@@ -87,8 +106,7 @@ def authentication():
# Se solicita url y código de verificación para conceder permiso a la app
url = "http://api.real-debrid.com/oauth/v2/device/code?client_id=%s&new_credentials=yes" % (client_id)
- data = httptools.downloadpage(url, headers=headers.items()).data
- data = jsontools.load(data)
+ data = httptools.downloadpage(url, headers=list(headers.items())).json
verify_url = data["verification_url"]
user_code = data["user_code"]
device_code = data["device_code"]
@@ -108,8 +126,7 @@ def authentication():
url = "https://api.real-debrid.com/oauth/v2/device/credentials?client_id=%s&code=%s" \
% (client_id, device_code)
- data = httptools.downloadpage(url, headers=headers.items()).data
- data = jsontools.load(data)
+ data = httptools.downloadpage(url, headers=list(headers.items())).json
if "client_secret" in data:
# Código introducido, salimos del bucle
break
@@ -127,9 +144,8 @@ def authentication():
# Se solicita el token de acceso y el de actualización para cuando el primero caduque
post = urllib.urlencode({"client_id": debrid_id, "client_secret": secret, "code": device_code,
"grant_type": "http://oauth.net/grant_type/device/1.0"})
- data = htttools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post,
- headers=headers.items()).data
- data = jsontools.load(data)
+ data = httptools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post,
+ headers=list(headers.items())).json
token = data["access_token"]
refresh = data["refresh_token"]
diff --git a/servers/decrypters/adfly.py b/servers/decrypters/adfly.py
old mode 100644
new mode 100755
index e1704be3..20b34d18
--- a/servers/decrypters/adfly.py
+++ b/servers/decrypters/adfly.py
@@ -13,7 +13,7 @@ def get_long_url(short_url):
data = httptools.downloadpage(short_url).data
ysmm = scrapertools.find_single_match(data, "var ysmm = '([^']+)';")
b64 = ""
- for i in reversed(range(len(ysmm))):
+ for i in reversed(list(range(len(ysmm)))):
if i % 2:
b64 = b64 + ysmm[i]
else:
diff --git a/servers/decrypters/linkbucks.py b/servers/decrypters/linkbucks.py
old mode 100644
new mode 100755
index c748222d..5b15ec00
--- a/servers/decrypters/linkbucks.py
+++ b/servers/decrypters/linkbucks.py
@@ -1,6 +1,15 @@
# -*- coding: utf-8 -*-
-import urllib
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+
+if PY3:
+ #from future import standard_library
+ #standard_library.install_aliases()
+ import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
+else:
+ import urllib # Usamos el nativo de PY2 que es más rápido
from core import scrapertools
from platformcode import logger
diff --git a/servers/decrypters/longurl.py b/servers/decrypters/longurl.py
old mode 100644
new mode 100755
index 3c628bdc..4c6cb012
--- a/servers/decrypters/longurl.py
+++ b/servers/decrypters/longurl.py
@@ -1,7 +1,17 @@
# -*- coding: utf-8 -*-
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+
+if PY3:
+ #from future import standard_library
+ #standard_library.install_aliases()
+ import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
+else:
+ import urllib # Usamos el nativo de PY2 que es más rápido
+
import re
-import urllib
from core import httptools
from core import scrapertools
@@ -41,5 +51,5 @@ def get_long_urls(data):
long_url = scrapertools.scrapertools.find_single_match(longurl_data, '')
except:
long_url = ""
- if (long_url <> ""): data = data.replace(short_url, long_url)
+ if (long_url != ""): data = data.replace(short_url, long_url)
return data
diff --git a/servers/directo.py b/servers/directo.py
index d3db16e9..d5e55f17 100644
--- a/servers/directo.py
+++ b/servers/directo.py
@@ -6,7 +6,6 @@ from platformcode import logger, config
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
- logger.info('PAGE URL= ' + page_url)
video_urls = [["%s %s" % (page_url[-4:], config.get_localized_string(30137)), page_url]]
diff --git a/servers/drtuber.json b/servers/drtuber.json
new file mode 100644
index 00000000..c5dddacf
--- /dev/null
+++ b/servers/drtuber.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "http://www.drtuber.com/embed/([0-9]+)",
+ "url": "\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "drtuber",
+ "name": "drtuber",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": ""
+}
diff --git a/servers/drtuber.py b/servers/drtuber.py
new file mode 100644
index 00000000..de444847
--- /dev/null
+++ b/servers/drtuber.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+import re
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+
+def get_video_url(page_url, video_password):
+ logger.info("(page_url='%s')" % page_url)
+ video_urls = []
+ url = "https://www.drtuber.com/player_config_json/?vid=%s&aid=0&domain_id=0&embed=0&ref=null&check_speed=0" %page_url
+ data = httptools.downloadpage(url).data
+ data = scrapertools.find_single_match(data, '"files":(.*?)"quality"')
+ patron = '"([lh])q":"([^"]+)"'
+ matches = scrapertools.find_multiple_matches(data, patron)
+ for quality, scrapedurl in matches:
+ url = scrapedurl.replace("\/", "/")
+ if "l" in quality: quality = "360"
+ if "h" in quality: quality = "720"
+ video_urls.append(["[drtuber] %s" %quality, url])
+ return video_urls
+
diff --git a/servers/eporner.json b/servers/eporner.json
new file mode 100644
index 00000000..c288a1f3
--- /dev/null
+++ b/servers/eporner.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "(https://www.eporner.com/hd-porn/[A-z0-9-]+/[A-z0-9-]+)",
+ "url": "\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "eporner",
+ "name": "eporner",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": ""
+}
diff --git a/servers/eporner.py b/servers/eporner.py
new file mode 100644
index 00000000..a30cf276
--- /dev/null
+++ b/servers/eporner.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+import re
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ global data
+ data = httptools.downloadpage(page_url).data
+ if "WE ARE SORRY" in data or '404 Not Found' in data:
+ return False, "[eporner] El fichero no existe o ha sido borrado"
+ return True, ""
+
+
+def get_video_url(page_url, video_password):
+ logger.info("(page_url='%s')" % page_url)
+ video_urls = []
+ data = httptools.downloadpage(page_url).data
+ data = re.sub(r"\n|\r|\t| | | ", "", data)
+ patron = "EP: {vid: '([^']+)',hash: '([^']+)'"
+ vid, hash = re.compile(patron, re.DOTALL).findall(data)[0]
+ hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36(
+ int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16))
+ url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash)
+ jsondata = httptools.downloadpage(url).json
+ for source in jsondata["sources"]["mp4"]:
+ url = jsondata["sources"]["mp4"][source]["src"]
+ title = source.split(" ")[0]
+ video_urls.append(["[eporner] %s"% title, url])
+ return video_urls
+ # return sorted(video_urls, key=lambda i: int(i[0].split("p")[1]))
+
+
+
+
+def int_to_base36(num):
+ """Converts a positive integer into a base36 string."""
+ assert num >= 0
+ digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower()
+
+ res = ''
+ while not res or num > 0:
+ num, i = divmod(num, 36)
+ res = digits[i] + res
+ return res
+
diff --git a/servers/estream.json b/servers/estream.json
index d9f644cf..dfcae14c 100644
--- a/servers/estream.json
+++ b/servers/estream.json
@@ -1,5 +1,5 @@
{
- "active": true,
+ "active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
@@ -43,4 +43,4 @@
}
],
"thumbnail": "https://s14.postimg.cc/ibd54ayf5/estream.png"
-}
\ No newline at end of file
+}
diff --git a/servers/estream.py b/servers/estream.py
index fe5ae6eb..6ff87af2 100644
--- a/servers/estream.py
+++ b/servers/estream.py
@@ -4,7 +4,6 @@
# --------------------------------------------------------
import re
-
from core import httptools
from platformcode import logger
diff --git a/servers/facebook.py b/servers/facebook.py
old mode 100644
new mode 100755
index 829703a9..15721d9a
--- a/servers/facebook.py
+++ b/servers/facebook.py
@@ -1,7 +1,17 @@
# -*- coding: utf-8 -*-
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+
+if PY3:
+ #from future import standard_library
+ #standard_library.install_aliases()
+ import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
+else:
+ import urllib # Usamos el nativo de PY2 que es más rápido
+
import re
-import urllib
from core import httptools
from core import scrapertools
diff --git a/servers/fastplay.json b/servers/fastplay.json
old mode 100644
new mode 100755
index 63b6de23..274c91e6
--- a/servers/fastplay.json
+++ b/servers/fastplay.json
@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "fastplay.(?:to|cc|sx)/(?:flash-|embed-|)([A-z0-9]+)",
- "url": "http://fastplay.cc/embed-\\1.html"
+ "url": "http://fastplay.to/embed-\\1.html"
}
]
},
diff --git a/servers/fembed.py b/servers/fembed.py
index 250f6d8e..2f520fc5 100644
--- a/servers/fembed.py
+++ b/servers/fembed.py
@@ -7,8 +7,13 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
- if "Sorry 404 not found" in data:
- return False, config.get_localized_string(70292) % "Fembed"
+ if "Sorry 404 not found" in data or "This video is unavailable" in data or "Sorry this video is unavailable:" in data:
+ return False, "[fembed] El fichero ha sido borrado"
+ page_url = page_url.replace("/f/","/v/")
+ page_url = page_url.replace("/v/","/api/source/")
+ data = httptools.downloadpage(page_url, post={}).data
+ if "Video not found or" in data:
+ return False, "[fembed] El fichero ha sido borrado"
return True, ""
diff --git a/servers/filebebo.py b/servers/filebebo.py
index d32a513d..4ee3f98a 100644
--- a/servers/filebebo.py
+++ b/servers/filebebo.py
@@ -4,7 +4,6 @@
# -*- By the Alfa Develop Group -*-
import re
-
from core import httptools
from core import scrapertools
from platformcode import logger
diff --git a/servers/flashx.json b/servers/flashx.json
index 223c5d16..ef08a819 100644
--- a/servers/flashx.json
+++ b/servers/flashx.json
@@ -16,6 +16,9 @@
"free": true,
"id": "flashx",
"name": "flashx",
+ "premium": [
+ "realdebrid"
+ ],
"settings": [
{
"default": false,
diff --git a/servers/flashx.py b/servers/flashx.py
index 00f012ab..8ce5d966 100644
--- a/servers/flashx.py
+++ b/servers/flashx.py
@@ -1,30 +1,61 @@
# -*- coding: utf-8 -*-
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+
+if PY3:
+ #from future import standard_library
+ #standard_library.install_aliases()
+ import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
+else:
+ import urllib # Usamos el nativo de PY2 que es más rápido
+
import os
import time
-import urllib
from core import httptools, scrapertools
from lib import jsunpack
-from platformcode import config, logger
+from platformcode import config, logger, platformtools
+flashx_data = ""
+flashx_hash_f = ""
+flashx_post = ""
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
-
- data = httptools.downloadpage(page_url, cookies=False).data
- if 'file was deleted' in data or 'File Not Found (Deleted or Abused)' in data:
+ global flashx_data
+ try:
+ flashx_data = httptools.downloadpage(page_url, cookies="xfsts=pfp5dj3e6go1l2o1").data
+ except:
+ try:
+ flashx_data = httptools.downloadpage(page_url).data
+ except:
+ return False, config.get_localized_string(70296) % "FlashX"
+ bloque = scrapertools.find_single_match(flashx_data, '(?s)Form method="POST" action(.*?)span')
+ flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
+ fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
+ global flashx_hash_f
+ flashx_hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
+ imhuman = scrapertools.find_single_match(bloque, "value='([^']+)' name='imhuman'")
+ global flashx_post
+ flashx_post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
+ flashx_id, urllib.quote(fname), flashx_hash_f, imhuman)
+ if 'file was deleted' in flashx_data or 'File Not Found (Deleted or Abused)' in flashx_data:
return False, config.get_localized_string(70292) % "FlashX"
- elif 'Video is processing now' in data:
+ elif 'Video is processing now' in flashx_data:
return False, config.get_localized_string(70293) % "FlashX"
+ elif 'Too many views per minute' in flashx_data:
+ return False, config.get_localized_string(70300) % "FlashX"
return True, ""
+
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
- data = httptools.downloadpage(page_url, cookies=False).data
+ data = flashx_data
data = data.replace("\n", "")
cgi_counter = scrapertools.find_single_match(data,
"""(?is)src=.(https://www.flashx.../counter.cgi.*?[^(?:'|")]+)""")
@@ -33,43 +64,40 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//", "/"),
"""(?is)(flashx.../js\w+/c\w+.*?[^(?:'|")]+)""")
- data_fxfx = httptools.downloadpage(js_fxfx).data
- mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'", "").replace(" ", "")
- matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
- for f, v in matches:
- pfxfx += f + "=" + v + "&"
+ if len(js_fxfx) > 15:
+ data_fxfx = httptools.downloadpage(js_fxfx).data
+ mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'", "").replace(" ", "")
+ matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
+ for f, v in matches:
+ pfxfx += f + "=" + v + "&"
logger.info("mfxfxfx1= %s" % js_fxfx)
logger.info("mfxfxfx2= %s" % pfxfx)
if pfxfx == "":
pfxfx = "f=fail&fxfx=6"
coding_url = 'https://www.flashx.co/flashx.php?%s' % pfxfx
- # {f: 'y', fxfx: '6'}
- bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
- flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
- fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
- hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
- imhuman = scrapertools.find_single_match(bloque, "value='([^']+)' name='imhuman'")
- post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
- flashx_id, urllib.quote(fname), hash_f, imhuman)
- wait_time = scrapertools.find_single_match(data, "(\d+)")
-
+
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
httptools.downloadpage(coding_url, cookies=False)
httptools.downloadpage(cgi_counter, cookies=False)
-
+
+ ts = int(time.time())
+ flash_ts = scrapertools.find_single_match(flashx_hash_f, '-(\d{10})-')
+ wait_time = int(flash_ts) - ts
+ platformtools.dialog_notification('Cargando flashx', 'Espera de %s segundos requerida' % wait_time)
+
try:
- time.sleep(int(wait_time) + 1)
+ time.sleep(wait_time)
except:
time.sleep(6)
- data = httptools.downloadpage(playnow, post).data
+ data = httptools.downloadpage(playnow, post = flashx_post).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
if "You try to access this video with Kodi" in data:
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try:
- data = httptools.downloadpage(url_reload, cookies=False).data
- data = httptools.downloadpage(playnow, post, cookies=False).data
+ data = httptools.downloadpage(url_reload).data
+ data = httptools.downloadpage(playnow, post = flashx_post).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except:
pass
diff --git a/servers/flix555.json b/servers/flix555.json
index e34c7839..f642474c 100644
--- a/servers/flix555.json
+++ b/servers/flix555.json
@@ -1,11 +1,11 @@
{
- "active": true,
+ "active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
- "pattern": "flix555.com/([A-z0-9]+)",
- "url": "https://flix555.com/\\1"
+ "pattern": "flix555.com/(?:embed-|)([A-z0-9]+)",
+ "url": "https://flix555.com/embed-\\1.html"
}
]
},
diff --git a/servers/flix555.py b/servers/flix555.py
index 50f59973..da1a2397 100644
--- a/servers/flix555.py
+++ b/servers/flix555.py
@@ -1,49 +1,43 @@
# -*- coding: utf-8 -*-
-import time
-import urllib
-
+import re
from core import httptools, scrapertools
from lib import jsunpack
-from platformcode import logger, platformtools
-
+from platformcode import logger
+data = ""
def test_video_exists(page_url):
resp = httptools.downloadpage(page_url)
- if resp.code == 404 or 'File Not Found' in resp.data:
+ global data
+ data = resp.data
+ if resp.code == 404 or 'File Not Found' in resp.data or "File is no longer available" in resp.data:
return False, "[flix555] El video no está disponible"
return True, ""
-
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
- data = httptools.downloadpage(page_url).data
- # ~ logger.info(data)
-
- post = {}
- inputs = scrapertools.find_multiple_matches(data, '(\d+)')
- platformtools.dialog_notification('Cargando flix555', 'Espera de %s segundos requerida' % espera)
- time.sleep(int(espera))
-
- data = httptools.downloadpage(page_url, post=post).data
- # ~ logger.info(data)
-
packed = scrapertools.find_single_match(data, "")
unpacked = jsunpack.unpack(packed)
# ~ logger.info(unpacked)
-
+ unpacked = re.sub(r'\n|\r|\t|\s{2,}', "", unpacked)
+ subtitles = scrapertools.find_single_match(unpacked, r'tracks:\s*\[\{\s*file\s*:\s*"([^"]*)"\s*,\s*label')
+ if "empty." in subtitles: subtitles = ""
matches = scrapertools.find_multiple_matches(unpacked, 'file\s*:\s*"([^"]*)"\s*,\s*label\s*:\s*"([^"]*)"')
if matches:
for url, lbl in matches:
- if not url.endswith('.srt'):
- itemlist.append(['[%s]' % lbl, url])
+
+ if url.endswith('.srt') or url.endswith('.vtt'):
+ #subtitles += url
+ continue
+
+ itemlist.append(['.mp4 (%s) [flix555]' % lbl, url, 0, subtitles])
+
+ url = scrapertools.find_single_match(unpacked, 'file\s*:\s*"([^"]*)"\s*')
+ if url:
+ if not url.endswith('.srt') or not url.endswith('.vtt'):
+ itemlist.append(['.m3u8 [flix555]', url, 0, subtitles])
return itemlist
diff --git a/servers/gamovideo.json b/servers/gamovideo.json
old mode 100644
new mode 100755
index 1ebc6814..510e6307
--- a/servers/gamovideo.json
+++ b/servers/gamovideo.json
@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "gamovideo.com/(?:embed-|)([a-z0-9]+)",
- "url": "http://gamovideo.com/\\1"
+ "url": "http://gamovideo.com/embed-\\1.html"
}
]
},
diff --git a/servers/gamovideo.py b/servers/gamovideo.py
old mode 100644
new mode 100755
index f08eb568..79d1b967
--- a/servers/gamovideo.py
+++ b/servers/gamovideo.py
@@ -1,39 +1,77 @@
# -*- coding: utf-8 -*-
-import re
+import sys
+PY3 = False
+if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
+import re
+import random
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
+if not PY3: from lib import alfaresolver
+else: from lib import alfaresolver_py3 as alfaresolver
-headers = {"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0"}
+ver = random.randint(66, 67)
+headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:%s.0) Gecko/20100101 Firefox/%s.0" % (ver, ver)}
+
+DATA = ''
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
- data = httptools.downloadpage(page_url, headers=headers, cookies=False).data
+
- if "File was deleted" in data or " |