rebase servers

This commit is contained in:
marco
2020-03-06 20:50:52 +01:00
parent a2c4aeb93b
commit 2ecdf23042
193 changed files with 5798 additions and 509 deletions

View File

@@ -5,7 +5,7 @@ import urllib, re
from core import httptools
from core import scrapertools
from platformcode import logger, config
from platformcode import logger, config, platformtools
from core.support import dbg
@@ -40,7 +40,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# dbg()
global data
logger.info('PAGE DATA' + data)
# logger.info('PAGE DATA' + data)
# sitekey = scrapertools.find_single_match(data, 'data-sitekey="([^"]+)')
sitekey = '6LeNU5IUAAAAAPNs_w-s8Rc-X2C2SPE3UW8lkkjW'
# from core import support
# support.dbg()
captcha = platformtools.show_recaptcha(sitekey, page_url) if sitekey else ''
if captcha:
data = httptools.downloadpage(page_url, post={'g-recaptcha-response': captcha}).data
vres = scrapertools.find_multiple_matches(data, 'nowrap[^>]+>([^,]+)')
if not vres: vres = scrapertools.find_multiple_matches(data, '<td>(\d+x\d+)')

View File

@@ -20,15 +20,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = 'id="download-quality-(\w+).*?href="([^"]+)"'
patron = 'download-url.*?href="([^"]+)"'
match = scrapertools.find_multiple_matches(data, patron)
for calidad, media_url in match:
title = "%s [anonfile]" % (calidad)
video_urls.append([title, media_url, int(calidad.replace("p", ""))])
video_urls.sort(key=lambda x: x[2])
for video_url in video_urls:
video_url[2] = 0
logger.info("%s - %s" % (video_url[0], video_url[1]))
for media_url in match:
media_url += "|Referer=%s" %page_url
title = "mp4 [anonfile]"
video_urls.append([title, media_url])
return video_urls

View File

@@ -10,8 +10,8 @@
]
},
"free": true,
"id": "ArchiveOrg",
"name": "archiveorg",
"id": "archiveorg",
"name": "ArchiveOrg",
"settings": [
{
"default": false,

42
servers/badshare.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "badshare.io/(?:plugins/mediaplayer/site/_embed.php\\?u=|)([A-z0-9]+)",
"url": "https://badshare.io/\\1"
}
]
},
"free": true,
"id": "badshare",
"name": "badshare",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://badshare.io/badshare_logo.png"
}

32
servers/badshare.py Normal file
View File

@@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Badshare By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global page
page = httptools.downloadpage(page_url)
if not page.sucess:
return False, "[Badshare] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
ext = '.mp4'
data = page.data
data = re.sub(r'\n|\r|\t|\s{2,}', "", data)
media_url, ext = scrapertools.find_single_match(data, r'file:\s*"([^"]+)",type:\s*"([^"]+)"')
video_urls.append(["%s [Badshare]" % ext, media_url])
return video_urls

View File

@@ -4,8 +4,12 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://bdupload.info/[A-z0-9]+)",
"pattern": "(https://bdupload.(?:info|asia)/[A-z0-9]+)",
"url": "\\1"
},
{
"pattern": "https://dl.bdupload.(?:info|asia|in)/([A-z0-9]+)",
"url": "https://bdupload.asia/\\1"
}
]
},

View File

@@ -29,7 +29,8 @@ def get_video_url(page_url, user="", password="", video_password=""):
data1 = httptools.downloadpage(page_url, post = post, headers = headers).data
patron = "window.open\('([^']+)"
file = scrapertools.find_single_match(data1, patron).replace(" ","%20")
file += "|User-Agent=" + headers['User-Agent']
file += "|User-Agent=" + httptools.get_user_agent()
file += "&Host=fs30.indifiles.com:182"
video_urls = []
videourl = file
video_urls.append([".MP4 [bdupload]", videourl])

View File

@@ -6,6 +6,10 @@
{
"pattern": "(http://b.ter.tv/v/[A-z0-9]+)",
"url": "\\1"
},
{
"pattern": "(https://byter.tv/v/[A-z0-9]+)",
"url": "\\1"
}
]
},

View File

@@ -7,6 +7,10 @@
"pattern": "https://www.bitporno.com/(?:e|embed)/([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
},
{
"pattern": "https://www.bitporno.com/\\?v=([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
},
{
"pattern": "raptu.com/(?:\\?v=|embed/|e/|v/)([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"

View File

@@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?title="([^"]+)')
videourl = scrapertools.find_multiple_matches(data, '<source src="([^"]+)".*?label="([^"]+)"')
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
if "loadthumb" in scrapedurl:
@@ -32,3 +32,4 @@ def get_video_url(page_url, user="", password="", video_password=""):
video_urls.append([scrapedquality + " [bitp]", scrapedurl])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

78
servers/bravoporn.json Normal file
View File

@@ -0,0 +1,78 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.bravoporn.com/videos/[0-9]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.bravotube.net/videos/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://xcafe.com/[0-9]+/)",
"url": "\\1"
},
{
"pattern": "(https://es.anyporn.com/[0-9]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.alphaporno.com/videos/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://xbabe.com/videos/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://xcum.com/v/[0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://sex3.com/[0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.tubewolf.com/movies/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://anysex.com/[0-9]+/)",
"url": "\\1"
}
]
},
"free": true,
"id": "bravoporn",
"name": "bravoporn",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

27
servers/bravoporn.py Normal file
View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data, server
data = httptools.downloadpage(page_url).data
server = scrapertools.find_single_match(page_url, 'https://(?:www.|es.|)([A-z0-9-]+).(?:com|net)')
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
return False, "[%s] El fichero no existe o ha sido borrado" %server
return True, ""
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
patron = '<source (?:id="video_source_\d+" |data-fluid-hd |)src="([^"]+)".*?title="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url,quality in matches:
url += "|Referer=%s" % page_url
logger.debug(url)
video_urls.append(["[%s] %s" %(server,quality), url])
return video_urls

View File

@@ -5,7 +5,11 @@
"patterns": [
{
"pattern": "https://cinemaupload.com/embed/([a-zA-Z0-9]+)",
"url": "https://cinemaupload.com/embed/\\1/"
"url": "https://embed.cload.video/embed/\\1/"
},
{
"pattern": "(embed.cload.video/(?:embed|intermediate)/\\w+)",
"url": "https://\\1"
}
]
},

View File

@@ -3,7 +3,6 @@
# Conector Cinemaupload By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -22,8 +21,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "source: '([^']+)',"
patron = 'file: "([^"]+)",'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
url += "|Referer=%s" %page_url
video_urls.append(['.m3u8 [CinemaUpload]', url])
return video_urls

View File

@@ -13,7 +13,8 @@
"id": "clicknupload",
"name": "clicknupload",
"premium": [
"realdebrid"
"realdebrid",
"alldebrid"
],
"settings": [
{

14
servers/clicknupload.py Normal file → Executable file
View File

@@ -1,6 +1,15 @@
# -*- coding: utf-8 -*-
import urllib
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from core import httptools
from core import scrapertools
@@ -48,7 +57,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
def get_data(url_orig, req_post=""):
try:
if not excption:
response = httptools.downloadpage(url_orig, req_post)
response = httptools.downloadpage(url_orig, post=req_post)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
global excption
excption = True
@@ -57,7 +66,6 @@ def get_data(url_orig, req_post=""):
else:
raise Exception
except:
import urllib
post = {"address": url_orig.replace(".me", ".org")}
if req_post:
post["options"] = [{"man": "--data", "attribute": req_post}]

View File

@@ -4,14 +4,18 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "clipwatching.com/((?:embed-)?[a-zA-Z0-9./_\\-\\[\\]\\(\\)]+).html",
"url": "http://clipwatching.com/\\1.html"
"pattern": "clipwatching.com/(e.*?.html)",
"url": "http://clipwatching.com/\\1"
},
{
"pattern": "clipwatching.com/(\\w+)",
"url": "http://clipwatching.com/embed-\\1.html"
}
]
},
"free": true,
"id": "clipwatching",
"name": "ClipWatching",
"name": "clipwatching",
"settings": [
{
"default": false,

View File

@@ -1,14 +1,13 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "File Not Found" in data or "File was deleted" in data:
return False, config.get_localized_string(70292) % "ClipWatching"
@@ -17,18 +16,17 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
data = re.sub('\t|\n','',data)
logger.info('CLIP DATA= ' + data)
packed = scrapertools.find_single_match(data, r"text/javascript'>(.*?)\s*</script>")
try: unpacked = jsunpack.unpack(packed)
except: unpacked = data
video_urls = []
videos = scrapertools.find_multiple_matches(unpacked, r'(?:file|src):\s*"([^"]+).*?type:\s*"video/([^"]+)".*?label:\s*"([^"]+)')
for video, Type, label in videos:
logger.info(Type)
logger.info(label)
try:
packed = scrapertools.find_single_match(data, "text/javascript'>(eval.*?)\s*</script>")
unpacked = jsunpack.unpack(packed)
except:
unpacked = scrapertools.find_single_match(data,"window.hola_player.*")
videos = scrapertools.find_multiple_matches(unpacked, r'(?:file|src):\s*"([^"]+).*?label:\s*"([^"]+)')
for video, label in videos:
if ".jpg" not in video:
video_urls.append(['%s [%sp] [ClipWatching]' % (Type, label), video])
video_urls.sort(key=lambda x: x[0].split()[1])
if not label.endswith('p'):
label += 'p'
video_urls.append([label + " [clipwatching]", video])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

View File

@@ -4,14 +4,14 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "cloudvideo.tv/(?:embed-)?([a-z0-9]+)(?:.html)?",
"pattern": "cloudvideo.tv/(?:embed-|)([a-z0-9]+)(?:.html|)",
"url": "https://cloudvideo.tv/embed-\\1.html"
}
]
},
"free": true,
"id": "cloudvideo",
"name": "CloudVideo",
"name": "cloudvideo",
"settings": [
{
"default": false,

View File

@@ -36,48 +36,6 @@
],
"type": "list",
"visible": false
},
{
"default": false,
"enabled": true,
"id": "premium",
"label": "Activar cuenta premium",
"type": "bool",
"visible": true
},
{
"default": "",
"enabled": "eq(-1,true)",
"id": "user",
"label": "@30014",
"type": "text",
"visible": true
},
{
"default": "",
"enabled": "eq(-2,true)+!eq(-1,'')",
"hidden": true,
"id": "password",
"label": "@30015",
"type": "text",
"visible": true
},
{
"default": 0,
"enabled": "eq(-3,true)",
"id": "sub",
"label": "Idioma de subtítulos preferido",
"lvalues": [
"Español España",
"Español Latino",
"Inglés",
"Italiano",
"Francés",
"Portugués",
"Alemán"
],
"type": "list",
"visible": true
}
],
"thumbnail": "http://i.imgur.com/SglkLAb.png?1"

57
servers/crunchyroll.py Normal file → Executable file
View File

@@ -1,5 +1,17 @@
# -*- coding: utf-8 -*-
from builtins import range
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import base64
import struct
import zlib
@@ -11,15 +23,14 @@ from core import scrapertools
from platformcode import config, logger
GLOBAL_HEADER = {'User-Agent': 'Mozilla/5.0', 'Accept-Language': '*'}
proxy = "http://anonymouse.org/cgi-bin/anon-www.cgi/"
proxy_i = "https://www.usa-proxy.org/index.php"
proxy = "https://www.usa-proxy.org/"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
premium = config.get_setting("premium", server="crunchyroll")
if premium:
return login(page_url)
data = httptools.downloadpage(page_url, headers=GLOBAL_HEADER, replace_headers=True).data
data = httptools.downloadpage(page_url, headers=GLOBAL_HEADER).data
if "Este es un clip de muestra" in data:
disp = scrapertools.find_single_match(data, '<a href="/freetrial".*?</span>.*?<span>\s*(.*?)</span>')
disp = disp.strip()
@@ -30,6 +41,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
#page_url='https://www.crunchyroll.com/es-es/one-piece/episode-891-climbing-up-a-waterfall-a-great-journey-through-the-land-of-wanos-sea-zone-786643'
logger.info("url=" + page_url)
video_urls = []
if "crunchyroll.com" in page_url:
@@ -39,10 +51,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url = "https://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s" \
"&video_format=0&video_quality=0&auto_play=0&aff=af-12299-plwa" % media_id
post = "current_page=%s" % page_url
data = httptools.downloadpage(url, post, headers=GLOBAL_HEADER, replace_headers=True).data
data = httptools.downloadpage(url, post=post, headers=GLOBAL_HEADER).data
if "<msg>Media not available</msg>" in data or "flash_block.png" in data:
data = httptools.downloadpage(proxy + url, post, headers=GLOBAL_HEADER, replace_headers=True,
cookies=False).data
httptools.downloadpage(proxy_i)
url = urllib.quote(url)
get = '%sbrowse.php?u=%s&b=4' % (proxy, url)
data = httptools.downloadpage(get, post=post, headers=GLOBAL_HEADER).data
media_url = scrapertools.find_single_match(data, '<file>(.*?)</file>').replace("&amp;", "&")
if not media_url:
return video_urls
@@ -54,18 +69,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
filename = scrapertools.get_filename_from_url(media_url)[-4:]
quality = scrapertools.find_single_match(data, '<height>(.*?)</height>')
try:
idiomas = ['Español \(España\)', 'Español\]', 'English', 'Italiano', 'Français', 'Português', 'Deutsch']
index_sub = int(config.get_setting("sub", server="crunchyroll"))
#idiomas = ['Español \(España\)', 'Español\]', 'English', 'Italiano', 'Français', 'Português', 'Deutsch']
idiomas = ['Deutsch', 'Português', 'Français', 'Italiano', 'English', 'Español\]', 'Español \(España\)']
index_sub = int(config.get_setting("crunchyrollsub", "crunchyroll"))
idioma_sub = idiomas[index_sub]
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[%s" % idioma_sub)
if not link_sub and index_sub == 0:
if not link_sub and index_sub == 6:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español\]")
elif not link_sub and index_sub == 1:
elif not link_sub and index_sub == 5:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español \(España\)")
if not link_sub:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[English")
data_sub = httptools.downloadpage(link_sub.replace("&amp;", "&"), headers=GLOBAL_HEADER,
replace_headers=True).data
data_sub = httptools.downloadpage(link_sub.replace("&amp;", "&"), headers=GLOBAL_HEADER).data
id_sub = scrapertools.find_single_match(data_sub, "subtitle id='([^']+)'")
iv = scrapertools.find_single_match(data_sub, '<iv>(.*?)</iv>')
data_sub = scrapertools.find_single_match(data_sub, '<data>(.*?)</data>')
@@ -84,13 +100,13 @@ def login(page_url):
login_page = "https://www.crunchyroll.com/login"
user = config.get_setting("user", server="crunchyroll")
password = config.get_setting("password", server="crunchyroll")
data = httptools.downloadpage(login_page, headers=GLOBAL_HEADER, replace_headers=True).data
data = httptools.downloadpage(login_page, headers=GLOBAL_HEADER).data
if not "<title>Redirecting" in data:
token = scrapertools.find_single_match(data, 'name="login_form\[_token\]" value="([^"]+)"')
redirect_url = scrapertools.find_single_match(data, 'name="login_form\[redirect_url\]" value="([^"]+)"')
post = "login_form%5Bname%5D=" + user + "&login_form%5Bpassword%5D=" + password + \
"&login_form%5Bredirect_url%5D=" + redirect_url + "&login_form%5B_token%5D=" + token
data = httptools.downloadpage(login_page, post, headers=GLOBAL_HEADER, replace_headers=True).data
data = httptools.downloadpage(login_page, post=post, headers=GLOBAL_HEADER).data
if "<title>Redirecting" in data:
return True, ""
else:
@@ -108,14 +124,16 @@ def decrypt_subs(iv, data, id):
data = base64.b64decode(data.encode('utf-8'))
iv = base64.b64decode(iv.encode('utf-8'))
id = int(id)
def obfuscate_key_aux(count, modulo, start):
output = list(start)
for _ in range(count):
output.append(output[-1] + output[-2])
# cut off start values
output = output[2:]
output = list(map(lambda x: x % modulo + 33, output))
output = list([x % modulo + 33 for x in output])
return output
def obfuscate_key(key):
from math import pow, sqrt, floor
num1 = int(floor(pow(2, 25) * sqrt(6.9)))
@@ -130,6 +148,7 @@ def decrypt_subs(iv, data, id):
decshaHash.append(ord(char))
# Extend 160 Bit hash to 256 Bit
return decshaHash + [0] * 12
key = obfuscate_key(id)
key = struct.pack('B' * len(key), *key)
decryptor = jscrypto.new(key, 2, iv)
@@ -202,5 +221,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
output += ',' + event.attrib['effect']
output += ',' + event.attrib['text']
output += '\n'
output = output.encode('utf-8')
if PY3: output = output.decode("utf-8")
return output.encode('utf-8')
return output

View File

@@ -8,6 +8,8 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if "Contenido rechazado" in response.data:
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
if response.code == 404:
return False, config.get_localized_string(70449) % "dailymotion"
return True, ""

42
servers/datoporn.py Normal file → Executable file
View File

@@ -2,6 +2,7 @@
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
@@ -10,7 +11,7 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if 'File Not Found' in data or '404 Not Found' in data:
if 'Not Found' in data or 'File is no longer available' in data:
return False, "[Datoporn] El archivo no existe o ha sido borrado"
return True, ""
@@ -18,38 +19,29 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, 'src: "([^"]+)",.*?label: "([^"]+)"')
#media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# if not media_urls:
# match = scrapertools.find_single_match(data, "p,a,c,k(.*?)</script>")
# try:
# data = jsunpack.unpack(match)
# except:
# pass
# media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# Extrae la URL
calidades = []
video_urls = []
for media_url in sorted(media_urls, key=lambda x: int(x[1][-3:])):
calidades.append(int(media_url[1][-3:]))
if not media_urls:
match = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
try:
title = ".%s %sp [datoporn]" % (media_url[0].rsplit('.', 1)[1], media_url[1][-3:])
data = jsunpack.unpack(match)
except:
title = ".%s %sp [datoporn]" % (media_url[-4:], media_url[1][-3:])
video_urls.append([title, media_url[0]])
sorted(calidades)
m3u8 = scrapertools.find_single_match(data, 'file\:"([^"]+\.m3u8)"')
pass
media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# Extrae la URL
for media_url, res in media_urls:
try:
title = ".%s %s [datoporn]" % (media_url.rsplit('.', 1)[1], res)
except:
title = ".%s %s [datoporn]" % (media_url[-4:], res)
video_urls.append([title, media_url])
m3u8 = scrapertools.find_single_match(data, 'src\:"([^"]+\.m3u8)"')
if not m3u8:
m3u8 = str(scrapertools.find_multiple_matches(data, 'player.updateSrc\({src:.?"([^"]+\.m3u8)"')).replace("['", "").replace("']", "")
calidades = ['720p']
if m3u8:
video_urls.insert(0, [".m3u8 %s [datoporn]" % calidades[-1], m3u8])
video_urls.insert(0, [".m3u8 720p [datoporn]" , m3u8])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

8
servers/debriders/realdebrid.json Normal file → Executable file
View File

@@ -11,6 +11,14 @@
"label": "@70272",
"type": "bool",
"visible": true
},
{
"default": "",
"enabled": "eq(-1,true)",
"id": "token",
"label": "Token (autentificación alternativa)",
"type": "text",
"visible": true
}
]
}

54
servers/debriders/realdebrid.py Normal file → Executable file
View File

@@ -1,20 +1,30 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import time
import urllib
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import config, logger
from platformcode import platformtools
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:65.0) Gecko/20100101 Firefox/65.0'}
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s' , video_password=%s)" % (page_url, video_password))
page_url = page_url.replace(".nz/embed", ".nz/")
# Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
token_auth = config.get_setting("token", server="realdebrid")
if token_auth is None or token_auth == "":
@@ -28,11 +38,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
post_link = urllib.urlencode([("link", page_url), ("password", video_password)])
headers["Authorization"] = "Bearer %s" % token_auth
url = "https://api.real-debrid.com/rest/1.0/unrestrict/link"
data = httptools.downloadpage(url, post=post_link, headers=headers.items()).data
data = jsontools.load(data)
data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
logger.error(data)
check = config.get_setting("secret", server="realdebrid")
#Se ha usado la autentificación por urlresolver (Bad Idea)
if "error" in data and data["error"] == "bad_token" and not check:
token_auth = authentication()
headers["Authorization"] = "Bearer %s" % token_auth
data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
# Si el token es erróneo o ha caducado, se solicita uno nuevo
if "error" in data and data["error"] == "bad_token":
elif "error" in data and data["error"] == "bad_token":
debrid_id = config.get_setting("id", server="realdebrid")
secret = config.get_setting("secret", server="realdebrid")
refresh = config.get_setting("refresh", server="realdebrid")
@@ -40,15 +58,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
post_token = urllib.urlencode({"client_id": debrid_id, "client_secret": secret, "code": refresh,
"grant_type": "http://oauth.net/grant_type/device/1.0"})
renew_token = httptools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post_token,
headers=headers.items()).data
renew_token = jsontools.load(renew_token)
headers=list(headers.items())).json
if not "error" in renew_token:
token_auth = renew_token["access_token"]
config.set_setting("token", token_auth, server="realdebrid")
headers["Authorization"] = "Bearer %s" % token_auth
data = httptools.downloadpage(url, post=post_link, headers=headers.items()).data
data = jsontools.load(data)
data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
else:
token_auth = authentication()
headers["Authorization"] = "Bearer %s" % token_auth
data = httptools.downloadpage(url, post=post_link, headers=list(headers.items())).json
if "download" in data:
return get_enlaces(data)
else:
@@ -87,8 +106,7 @@ def authentication():
# Se solicita url y código de verificación para conceder permiso a la app
url = "http://api.real-debrid.com/oauth/v2/device/code?client_id=%s&new_credentials=yes" % (client_id)
data = httptools.downloadpage(url, headers=headers.items()).data
data = jsontools.load(data)
data = httptools.downloadpage(url, headers=list(headers.items())).json
verify_url = data["verification_url"]
user_code = data["user_code"]
device_code = data["device_code"]
@@ -108,8 +126,7 @@ def authentication():
url = "https://api.real-debrid.com/oauth/v2/device/credentials?client_id=%s&code=%s" \
% (client_id, device_code)
data = httptools.downloadpage(url, headers=headers.items()).data
data = jsontools.load(data)
data = httptools.downloadpage(url, headers=list(headers.items())).json
if "client_secret" in data:
# Código introducido, salimos del bucle
break
@@ -127,9 +144,8 @@ def authentication():
# Se solicita el token de acceso y el de actualización para cuando el primero caduque
post = urllib.urlencode({"client_id": debrid_id, "client_secret": secret, "code": device_code,
"grant_type": "http://oauth.net/grant_type/device/1.0"})
data = htttools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post,
headers=headers.items()).data
data = jsontools.load(data)
data = httptools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post,
headers=list(headers.items())).json
token = data["access_token"]
refresh = data["refresh_token"]

2
servers/decrypters/adfly.py Normal file → Executable file
View File

@@ -13,7 +13,7 @@ def get_long_url(short_url):
data = httptools.downloadpage(short_url).data
ysmm = scrapertools.find_single_match(data, "var ysmm = '([^']+)';")
b64 = ""
for i in reversed(range(len(ysmm))):
for i in reversed(list(range(len(ysmm)))):
if i % 2:
b64 = b64 + ysmm[i]
else:

11
servers/decrypters/linkbucks.py Normal file → Executable file
View File

@@ -1,6 +1,15 @@
# -*- coding: utf-8 -*-
import urllib
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from core import scrapertools
from platformcode import logger

14
servers/decrypters/longurl.py Normal file → Executable file
View File

@@ -1,7 +1,17 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import re
import urllib
from core import httptools
from core import scrapertools
@@ -41,5 +51,5 @@ def get_long_urls(data):
long_url = scrapertools.scrapertools.find_single_match(longurl_data, '<long-url><!\[CDATA\[(.*?)\]\]></long-url>')
except:
long_url = ""
if (long_url <> ""): data = data.replace(short_url, long_url)
if (long_url != ""): data = data.replace(short_url, long_url)
return data

View File

@@ -6,7 +6,6 @@ from platformcode import logger, config
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.info('PAGE URL= ' + page_url)
video_urls = [["%s %s" % (page_url[-4:], config.get_localized_string(30137)), page_url]]

42
servers/drtuber.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "http://www.drtuber.com/embed/([0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "drtuber",
"name": "drtuber",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

23
servers/drtuber.py Normal file
View File

@@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
url = "https://www.drtuber.com/player_config_json/?vid=%s&aid=0&domain_id=0&embed=0&ref=null&check_speed=0" %page_url
data = httptools.downloadpage(url).data
data = scrapertools.find_single_match(data, '"files":(.*?)"quality"')
patron = '"([lh])q":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for quality, scrapedurl in matches:
url = scrapedurl.replace("\/", "/")
if "l" in quality: quality = "360"
if "h" in quality: quality = "720"
video_urls.append(["[drtuber] %s" %quality, url])
return video_urls

42
servers/eporner.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.eporner.com/hd-porn/[A-z0-9-]+/[A-z0-9-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "eporner",
"name": "eporner",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

48
servers/eporner.py Normal file
View File

@@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
return False, "[eporner] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = "EP: {vid: '([^']+)',hash: '([^']+)'"
vid, hash = re.compile(patron, re.DOTALL).findall(data)[0]
hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36(
int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16))
url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash)
jsondata = httptools.downloadpage(url).json
for source in jsondata["sources"]["mp4"]:
url = jsondata["sources"]["mp4"][source]["src"]
title = source.split(" ")[0]
video_urls.append(["[eporner] %s"% title, url])
return video_urls
# return sorted(video_urls, key=lambda i: int(i[0].split("p")[1]))
def int_to_base36(num):
"""Converts a positive integer into a base36 string."""
assert num >= 0
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower()
res = ''
while not res or num > 0:
num, i = divmod(num, 36)
res = digits[i] + res
return res

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
@@ -43,4 +43,4 @@
}
],
"thumbnail": "https://s14.postimg.cc/ibd54ayf5/estream.png"
}
}

View File

@@ -4,7 +4,6 @@
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger

12
servers/facebook.py Normal file → Executable file
View File

@@ -1,7 +1,17 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import re
import urllib
from core import httptools
from core import scrapertools

2
servers/fastplay.json Normal file → Executable file
View File

@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "fastplay.(?:to|cc|sx)/(?:flash-|embed-|)([A-z0-9]+)",
"url": "http://fastplay.cc/embed-\\1.html"
"url": "http://fastplay.to/embed-\\1.html"
}
]
},

View File

@@ -7,8 +7,13 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Sorry 404 not found" in data:
return False, config.get_localized_string(70292) % "Fembed"
if "Sorry 404 not found" in data or "This video is unavailable" in data or "Sorry this video is unavailable:" in data:
return False, "[fembed] El fichero ha sido borrado"
page_url = page_url.replace("/f/","/v/")
page_url = page_url.replace("/v/","/api/source/")
data = httptools.downloadpage(page_url, post={}).data
if "Video not found or" in data:
return False, "[fembed] El fichero ha sido borrado"
return True, ""

View File

@@ -4,7 +4,6 @@
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger

View File

@@ -16,6 +16,9 @@
"free": true,
"id": "flashx",
"name": "flashx",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,

View File

@@ -1,30 +1,61 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import os
import time
import urllib
from core import httptools, scrapertools
from lib import jsunpack
from platformcode import config, logger
from platformcode import config, logger, platformtools
flashx_data = ""
flashx_hash_f = ""
flashx_post = ""
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'file was deleted' in data or 'File Not Found (Deleted or Abused)' in data:
global flashx_data
try:
flashx_data = httptools.downloadpage(page_url, cookies="xfsts=pfp5dj3e6go1l2o1").data
except:
try:
flashx_data = httptools.downloadpage(page_url).data
except:
return False, config.get_localized_string(70296) % "FlashX"
bloque = scrapertools.find_single_match(flashx_data, '(?s)Form method="POST" action(.*?)span')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
global flashx_hash_f
flashx_hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
imhuman = scrapertools.find_single_match(bloque, "value='([^']+)' name='imhuman'")
global flashx_post
flashx_post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
flashx_id, urllib.quote(fname), flashx_hash_f, imhuman)
if 'file was deleted' in flashx_data or 'File Not Found (Deleted or Abused)' in flashx_data:
return False, config.get_localized_string(70292) % "FlashX"
elif 'Video is processing now' in data:
elif 'Video is processing now' in flashx_data:
return False, config.get_localized_string(70293) % "FlashX"
elif 'Too many views per minute' in flashx_data:
return False, config.get_localized_string(70300) % "FlashX"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
data = httptools.downloadpage(page_url, cookies=False).data
data = flashx_data
data = data.replace("\n", "")
cgi_counter = scrapertools.find_single_match(data,
"""(?is)src=.(https://www.flashx.../counter.cgi.*?[^(?:'|")]+)""")
@@ -33,43 +64,40 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//", "/"),
"""(?is)(flashx.../js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'", "").replace(" ", "")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
if len(js_fxfx) > 15:
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'", "").replace(" ", "")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
logger.info("mfxfxfx1= %s" % js_fxfx)
logger.info("mfxfxfx2= %s" % pfxfx)
if pfxfx == "":
pfxfx = "f=fail&fxfx=6"
coding_url = 'https://www.flashx.co/flashx.php?%s' % pfxfx
# {f: 'y', fxfx: '6'}
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
imhuman = scrapertools.find_single_match(bloque, "value='([^']+)' name='imhuman'")
post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
flashx_id, urllib.quote(fname), hash_f, imhuman)
wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
httptools.downloadpage(coding_url, cookies=False)
httptools.downloadpage(cgi_counter, cookies=False)
ts = int(time.time())
flash_ts = scrapertools.find_single_match(flashx_hash_f, '-(\d{10})-')
wait_time = int(flash_ts) - ts
platformtools.dialog_notification('Cargando flashx', 'Espera de %s segundos requerida' % wait_time)
try:
time.sleep(int(wait_time) + 1)
time.sleep(wait_time)
except:
time.sleep(6)
data = httptools.downloadpage(playnow, post).data
data = httptools.downloadpage(playnow, post = flashx_post).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
if "You try to access this video with Kodi" in data:
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try:
data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage(playnow, post, cookies=False).data
data = httptools.downloadpage(url_reload).data
data = httptools.downloadpage(playnow, post = flashx_post).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except:
pass

View File

@@ -1,11 +1,11 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "flix555.com/([A-z0-9]+)",
"url": "https://flix555.com/\\1"
"pattern": "flix555.com/(?:embed-|)([A-z0-9]+)",
"url": "https://flix555.com/embed-\\1.html"
}
]
},

View File

@@ -1,49 +1,43 @@
# -*- coding: utf-8 -*-
import time
import urllib
import re
from core import httptools, scrapertools
from lib import jsunpack
from platformcode import logger, platformtools
from platformcode import logger
data = ""
def test_video_exists(page_url):
resp = httptools.downloadpage(page_url)
if resp.code == 404 or '<b>File Not Found</b>' in resp.data:
global data
data = resp.data
if resp.code == 404 or '<b>File Not Found</b>' in resp.data or "<b>File is no longer available" in resp.data:
return False, "[flix555] El video no está disponible"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
data = httptools.downloadpage(page_url).data
# ~ logger.info(data)
post = {}
inputs = scrapertools.find_multiple_matches(data, '<input type="(?:hidden|submit)" name="([^"]*)" value="([^"]*)"')
for nom, valor in inputs: post[nom] = valor
post = urllib.urlencode(post)
# ~ logger.info(post)
espera = scrapertools.find_single_match(data, '<span id="cxc">(\d+)</span>')
platformtools.dialog_notification('Cargando flix555', 'Espera de %s segundos requerida' % espera)
time.sleep(int(espera))
data = httptools.downloadpage(page_url, post=post).data
# ~ logger.info(data)
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
# ~ logger.info(unpacked)
unpacked = re.sub(r'\n|\r|\t|\s{2,}', "", unpacked)
subtitles = scrapertools.find_single_match(unpacked, r'tracks:\s*\[\{\s*file\s*:\s*"([^"]*)"\s*,\s*label')
if "empty." in subtitles: subtitles = ""
matches = scrapertools.find_multiple_matches(unpacked, 'file\s*:\s*"([^"]*)"\s*,\s*label\s*:\s*"([^"]*)"')
if matches:
for url, lbl in matches:
if not url.endswith('.srt'):
itemlist.append(['[%s]' % lbl, url])
if url.endswith('.srt') or url.endswith('.vtt'):
#subtitles += url
continue
itemlist.append(['.mp4 (%s) [flix555]' % lbl, url, 0, subtitles])
url = scrapertools.find_single_match(unpacked, 'file\s*:\s*"([^"]*)"\s*')
if url:
if not url.endswith('.srt') or not url.endswith('.vtt'):
itemlist.append(['.m3u8 [flix555]', url, 0, subtitles])
return itemlist

2
servers/gamovideo.json Normal file → Executable file
View File

@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "gamovideo.com/(?:embed-|)([a-z0-9]+)",
"url": "http://gamovideo.com/\\1"
"url": "http://gamovideo.com/embed-\\1.html"
}
]
},

80
servers/gamovideo.py Normal file → Executable file
View File

@@ -1,39 +1,77 @@
# -*- coding: utf-8 -*-
import re
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
import random
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
if not PY3: from lib import alfaresolver
else: from lib import alfaresolver_py3 as alfaresolver
headers = {"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0"}
ver = random.randint(66, 67)
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:%s.0) Gecko/20100101 Firefox/%s.0" % (ver, ver)}
DATA = ''
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers, cookies=False).data
if "File was deleted" in data or "<noscript>" not in data or "File was locked by administrator" in data:
data = alfaresolver.get_data(page_url, False)
if not "|mp4|" in data:
dict_cookie = {'domain': '.gamovideo.com', 'expires': 0}
httptools.set_cookies(dict_cookie)
data = alfaresolver.get_data(page_url, False)
global DATA
DATA = data
if "images/proced.png" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
if "File was deleted" in data or ("Not Found" in data and not "|mp4|" in data) or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
if "Video is processing now" in data:
return False, "[Gamovideo] El video está procesándose en estos momentos. Inténtelo mas tarde."
if "File is awaiting for moderation" in data:
return False, "[Gamovideo] El video está esperando por moderación."
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers, cookies=False).data
data = DATA
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "":
data = jsunpack.unpack(packer)
if packer != "":
try:
data = jsunpack.unpack(packer)
except:
pass
else:
original = data
n = 0
referer = page_url.replace("embed-","").replace(".html", "")
data = ""
while n < 3 and not data:
data1 = alfaresolver.get_data(page_url, False)
check_c, data = get_gcookie(data1, True)
if check_c == False:
logger.error("Error get gcookie")
n += 1
data = re.sub(r'\n|\t|\s+', '', data)
host = scrapertools.find_single_match(data, '\[\{image:"(http://[^/]+/)')
mediaurl = scrapertools.find_single_match(data, ',\{file:"([^"]+)"')
host = scrapertools.find_single_match(data, r'\[\{image:"(http://[^/]+/)')
mediaurl = scrapertools.find_single_match(data, r',\{file:"([^"]+)"')
if not mediaurl.startswith(host):
mediaurl = host + mediaurl
@@ -50,3 +88,27 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def get_gcookie(data, realcheck=False):
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "" and realcheck:
try:
data = jsunpack.unpack(packer)
return True, data
except:
pass
patron = '\("\d","(\d)",\d\).*?\'(\w+)'
scraper = scrapertools.find_single_match(data,patron)
if scraper:
gcookie = "%s=%s;" % (scraper[1], scraper[0])
try:
old_gcookie = headers['Cookie']
if gcookie != old_gcookie:
gcookie = old_gcookie+' '+gcookie
except:
pass
headers.update({"Cookie": gcookie})
return True, ""
else:
return False, ""

1
servers/googlevideo.py Normal file → Executable file
View File

@@ -3,6 +3,7 @@
import re
from core import httptools
from core import scrapertools
from platformcode import logger

58
servers/gotporn.json Normal file
View File

@@ -0,0 +1,58 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.gotporn.com/[A-z0-9-]+/[A-z0-9-]+)",
"url": "\\1"
},
{
"pattern": "(https://mylust.com/videos/[0-9]+/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.stileproject.com/embed/[0-9]+)",
"url": "\\1"
},
{
"pattern": "(https://www.pornwatchers.com/embed/[0-9]+)",
"url": "\\1"
},
{
"pattern": "(https://www.pornrabbit.com/embed/[0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "gotporn",
"name": "gotporn",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

26
servers/gotporn.py Normal file
View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
return False, "[Mixdrop] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
url = scrapertools.find_single_match(data,'<source (?:id="video_source_\d+" |)src="([^"]+)" type=(?:\'|")video/mp4(?:\'|")')
host = scrapertools.find_single_match(page_url, '(https://.*?.com)')
url += "|Referer=%s" % host
logger.debug(url)
server = scrapertools.find_single_match(page_url, 'https://(?:www.|)([A-z0-9-]+).com')
video_urls.append(["[%s]" %server, url])
return video_urls

View File

@@ -4,8 +4,12 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "gounlimited.to/(?:embed-|)([a-z0-9]+)(?:.html|)",
"url": "https://gounlimited.to/\\1.html"
"pattern": "https://gounlimited.to/(?:embed-|)(\\w+)(?:\\.html|/|)",
"url": "https://gounlimited.to/embed-\\1.html"
},
{
"pattern": "https://gounlimited.to/(\\w+)/\\w+.",
"url": "https://gounlimited.to/embed-\\1.html"
}
]
},

View File

@@ -12,7 +12,7 @@
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "https://drive.google.com/uc\\?id=([A-z0-9-_=]+)",
"pattern": "http(?:s:|:)//drive.google.com/(?:uc|open)\\?id=([A-z0-9-_=]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
@@ -28,6 +28,10 @@
"free": true,
"id": "gvideo",
"name": "gvideo",
"premium": [
"realdebrid",
"alldebrid"
],
"settings": [
{
"default": false,

View File

@@ -1,6 +1,15 @@
# -*- coding: utf-8 -*-
import urllib
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from core import httptools
from core import scrapertools
@@ -11,8 +20,11 @@ def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, "" # desactivada verificación pq se encalla!
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
response = httptools.downloadpage(page_url, headers={"Referer": page_url})
global page
page = response
if "no+existe" in response.data or 'no existe.</p>' in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
@@ -28,50 +40,42 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info()
video_urls = []
urls = []
streams =[]
logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url:
response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
url=response.headers['location']
if "set-cookie" in response.headers:
try:
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
except:
headers_string = ""
else:
headers_string = ""
url = page_url
headers_string = httptools.get_url_headers(page_url, forced=True)
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
if not quality:
quality = '59'
streams.append((quality, url))
else:
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = page.data
bloque= scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map(.*)')
if bloque:
data = bloque
data = data.decode('unicode-escape', errors='replace')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
headers_string = httptools.get_url_headers(page_url, forced=True)
streams = scrapertools.find_multiple_matches(data,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '43': '360p', '59': '480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags[itag], video_url])
video_urls.append([itags.get(itag, ''), video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))

42
servers/hclips.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://hclips.com/embed/([0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "hclips",
"name": "hclips",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://hclips.com/favicon-194x194.png"
}

41
servers/hclips.py Normal file
View File

@@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector hclips By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
return False, config.get_localized_string(70449) % "hclips"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
url = "https://hclips.com/api/videofile.php?video_id=%s&lifetime=8640000" % page_url
headers = {'Referer': "https://hclips.com/embed/%s/" % page_url}
data = httptools.downloadpage(url, headers=headers).data
texto = scrapertools.find_single_match(data, 'video_url":"([^"]+)"')
url = dec_url(texto)
url = "https://hclips.com%s" % url
media_url = httptools.downloadpage(url, only_headers=True).url
video_urls.append(["[hclips]", media_url])
return video_urls
def dec_url(txt):
#truco del mendrugo
# txt = txt.replace('\u0410', 'A').replace('\u0412', 'B').replace('\u0421', 'C').replace('\u0415', 'E').replace('\u041c', 'M').replace('~', '=').replace(',','/')
txt = txt.decode('unicode-escape').encode('utf8')
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M').replace('~', '=').replace(',','/')
import base64
url = base64.b64decode(txt)
return url

12
servers/hugefiles.py Normal file → Executable file
View File

@@ -1,7 +1,17 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import re
import urllib
from core import httptools
from core import scrapertools

42
servers/iceporn.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "http://www.iceporn.com/embed/([0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "iceporn",
"name": "iceporn",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

23
servers/iceporn.py Normal file
View File

@@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
url = "https://www.iceporn.com/player_config_json/?vid=%s&aid=0&domain_id=0&embed=0&ref=null&check_speed=0" %page_url
data = httptools.downloadpage(url).data
data = scrapertools.find_single_match(data, '"files":(.*?)"quality"')
patron = '"([lh])q":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for quality, scrapedurl in matches:
url = scrapedurl.replace("\/", "/")
if "l" in quality: quality = "360"
if "h" in quality: quality = "720"
video_urls.append(["[iceporn] %s" %quality, url])
return video_urls

42
servers/idtbox.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://idtbox.com/(?:embed-|)([A-z0-9]+)",
"url": "https://idtbox.com/embed-\\1.html"
}
]
},
"free": true,
"id": "idtbox",
"name": "idtbox",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://idtbox.com/img/idtbox.png"
}

38
servers/idtbox.py Normal file
View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Idtbox By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools, scrapertools
from platformcode import logger, platformtools
data = ""
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url)
if not data.sucess or "Not Found" in data.data or "File was deleted" in data.data or "is no longer available" in data.data:
return False, "[Idtbox] El archivo no existe o ha sido borrado"
data = data.data
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.error(data)
video_urls = []
patron = 'source src="([^"]+)" type="([^"]+)" res=(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, ext, res in matches:
res = res+'p'
try:
ext = ext.split("/")[1]
except:
ext = ".mp4"
video_urls.append(["%s (%s) [idtbox]" % (ext, res), url])
return video_urls

69
servers/javwhores.json Normal file
View File

@@ -0,0 +1,69 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.javbangers.com/video/[0-9]+/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.fapnado.com/videos/[0-9]+/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.xozilla.com/videos/[0-9]+/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.camwhoresbay.com/videos/[0-9]+/[A-z0-9-]+/)",
"url": "\\1"
}, {
"pattern": "(https://www.analdin.com/es/videos/[0-9]+/[A-z0-9-]+/)",
"url": "\\1"
},
{
"pattern": "(https://www.javbangers.com/embed/\\w+)",
"url": "\\1"
},
{
"pattern": "(https://www.pornrewind.com/embed/\\w+)",
"url": "\\1"
},
{
"pattern": "(https://severeporn.com/embed/\\w+)",
"url": "\\1"
}
]
},
"free": true,
"id": "javwhores",
"name": "javwhores",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

44
servers/javwhores.py Normal file
View File

@@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector javwhores By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from platformcode import logger
from lib.kt_player import decode
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:
return False, "[javwhores] El fichero no existe o ha sido borrado"
# global video_url, license_code
# video_url = scrapertools.find_single_match(response.data, "video_url: '([^']+)'")
# license_code = scrapertools.find_single_match(response.data, "license_code: '([^']+)'")
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
data = httptools.downloadpage(page_url).data
if "video_url_text" in data:
patron = '(?:video_url|video_alt_url|video_alt_url[0-9]*):\s*\'([^\']+)\'.*?'
patron += '(?:video_url_text|video_alt_url_text|video_alt_url[0-9]*_text):\s*\'([^\']+)\''
else:
patron = 'video_url:\s*\'([^\']+)\'.*?'
patron += 'postfix:\s*\'([^\']+)\''
matches = re.compile(patron,re.DOTALL).findall(data)
for url,quality in matches:
itemlist.append(['%s' %quality, url])
logger.debug(quality + " : " + url)
return itemlist
# return [["[javwhores]", decode(video_url, license_code)]]

42
servers/jetload.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "jetload.net/(?:e|v|p|\\#\\!/v|\\#\\!/d)/(\\w+)",
"url": "https://jetload.net/e/\\1"
}
]
},
"free": true,
"id": "jetload",
"name": "jetload",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://jetload.net/jetlogo.png"
}

33
servers/jetload.py Normal file
View File

@@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector jetload By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools, jsontools
from core import scrapertools
from platformcode import logger
video_urls = []
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
subtitles = ""
response = httptools.downloadpage(page_url)
global data
data = response.data
if not response.sucess or "Not Found" in data or "File was deleted" in data or "is no longer available" in data:
return False, "[jetload] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
media_url = scrapertools.find_single_match(data, '<video src="([^"]+)"')
if media_url:
ext = media_url[-4:]
if ext == 'm3u8':
media_url = ''
video_urls.append(["%s [Jetload]" % (ext), media_url])
return video_urls

42
servers/jplayer.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "jplayer.net/v/([A-z0-9_-]+)",
"url": "https://www.jplayer.net/api/source/\\1"
}
]
},
"free": true,
"id": "jplayer",
"name": "jplayer",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

42
servers/jplayer.py Normal file
View File

@@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector jplayer By Alfa development Group
# --------------------------------------------------------
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, "[jplayer] El video ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
post = urllib.urlencode({"r":"", "d":"www.jplayer.net"})
data = httptools.downloadpage(page_url, post=post).data
json = jsontools.load(data)["data"]
for _url in json:
url = _url["file"]
label = _url["label"]
video_urls.append([label +" [jplayer]", url])
return video_urls

42
servers/keezmovies.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.keezmovies.com/video/[A-z0-9-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "keezmovies",
"name": "keezmovies",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

25
servers/keezmovies.py Normal file
View File

@@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
return False, "[keezmovies] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = '"quality_(\d+)p":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for quality, scrapedurl in matches:
url = scrapedurl.replace("\/", "/").replace("\u0026", "&")
video_urls.append(["[keezmovies] %sp" %quality, url])
return video_urls

42
servers/mangoplay.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(mangoplay.net(?://|/)embed.php\\?data=[A-z0-9_-]+)",
"url": "https://\\1"
}
]
},
"free": true,
"id": "mangoplay",
"name": "mangoplay",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

28
servers/mangoplay.py Normal file
View File

@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector mangoplay By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, "[mangoplay] El video ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
url = scrapertools.find_single_match(data, 'shareId = "([^"]+)')
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
url = url.replace(" ","%20")
video_urls.append([".MP4 [mangoplay]", url])
return video_urls

42
servers/mangovideo.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://mangovideo.pw/embed/\\d+)",
"url": "\\1"
}
]
},
"free": true,
"id": "mangovideo",
"name": "mangovideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

38
servers/mangovideo.py Normal file
View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector mangovideo By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
server = {'1': 'http://www.mangovideo.pw/contents/videos/', '7' : 'http://server9.mangovideo.pw/contents/videos/',
'8' : 'http://s10.mangovideo.pw/contents/videos/', '9' : 'http://server2.mangovideo.pw/contents/videos/',
'10' : 'http://server217.mangovideo.pw/contents/videos/', '11' : 'http://234.mangovideo.pw/contents/videos/',
'12' : 'http://98.mangovideo.pw/contents/videos/', '13' : 'http://68.mangovideo.pw/contents/videos/',
'15' : 'http://45.mangovideo.pw/contents/videos/'
}
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:
return False, "[mangovideo] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
video_urls = []
data = httptools.downloadpage(page_url).data
matches = scrapertools.find_multiple_matches(data, 'function/0/https://mangovideo.pw/get_file/(\d+)/\w+/(.*?.mp4)')
for scrapedserver,scrapedurl in matches:
scrapedserver = server.get(scrapedserver, scrapedserver)
url = scrapedserver + scrapedurl
video_urls.append(["[mangovideo]", url])
return video_urls

42
servers/manyvideos.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://manyvideos.xyz/embed/([A-z0-9]+)",
"url": "https://manyvideos.xyz/embed/\\1"
}
]
},
"free": true,
"id": "manyvideos",
"name": "manyvideos",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

35
servers/manyvideos.py Normal file
View File

@@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector manyvideos By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
import base64
from lib import jsunpack
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:
return False, "[manyvideos] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
video_urls = []
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, 'JuicyCodes.Run\(([^\)]+)\)')
data = data.replace("+", "")
data = base64.b64decode(data)
unpack = jsunpack.unpack(data)
matches = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+)","label":"([^"]+)"')
for url,quality in matches:
url = url.replace("v2.", "v1.")
video_urls.append(["[manyvideos] %s" % quality, url])
return video_urls

4
servers/mediafire.py Normal file → Executable file
View File

@@ -8,9 +8,9 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Invalid or Deleted File" in data:
if "Invalid or Deleted File" in data or "Well, looks like we" in data:
return False, "[Mediafire] El archivo no existe o ha sido borrado"
elif "File Removed for Violation" in data:
if "File Removed for Violation" in data:
return False, "[Mediafire] Archivo eliminado por infracción"
return True, ""

83
servers/mega.py Normal file → Executable file
View File

@@ -1,27 +1,90 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import json
import random
from core import httptools
from core import scrapertools
from platformcode import platformtools, logger
files = None
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
types= "Archivo"
gen = "o"
msg = "El link tiene algún problema."
id_video = None
get = ""
seqno = random.randint(0, 0xFFFFFFFF)
url = page_url.split("#")[1]
f_id = url.split("!")[1]
id_video = None
if "|" in url:
url, id_video = url.split("|")
post = {'a': 'g', 'g': 1, 'p': f_id}
isfolder = False
if "/#F!" in page_url:
get = "&n=" + f_id
post = {"a":"f","c":1,"r":0}
isfolder = True
types= "Carpeta"
gen = "a"
if id_video:
#Aqui ya para hacer un check se complica, no hay una manera directa aún teniendo la id del video dentro de la carpeta
return True, ""
codes = {-1: 'Se ha producido un error interno en Mega.nz',
-2: 'Error en la petición realizada, Cod -2',
-3: 'Un atasco temporal o malfuncionamiento en el servidor de Mega impide que se procese su link',
-4: 'Ha excedido la cuota de transferencia permitida. Vuelva a intentarlo más tarde',
-6: types + ' no encontrad' + gen + ', cuenta eliminada',
-9: types + ' no encontrad'+ gen,
-11: 'Acceso restringido',
-13: 'Está intentando acceder a un archivo incompleto',
-14: 'Una operación de desencriptado ha fallado',
-15: 'Sesión de usuario expirada o invalida, logueese de nuevo',
-16: types + ' no disponible, la cuenta del uploader fue baneada',
-17: 'La petición sobrepasa su cuota de transferiencia permitida',
-18: types + ' temporalmente no disponible, intentelo de nuevo más tarde'
}
api = 'https://g.api.mega.co.nz/cs?id=%d%s' % (seqno, get)
req_api = httptools.downloadpage(api, post=json.dumps([post])).data
if isfolder:
req_api = json.loads(req_api)
else:
try:
req_api = json.loads(req_api)[0]
except:
req_api = json.loads(req_api)
logger.error(req_api)
if isinstance(req_api, (int, long)):
if req_api in codes:
msg = codes[req_api]
return False, msg
else:
#Comprobación limite cuota restante
from megaserver import Client
c = Client(url=page_url, is_playing_fnc=platformtools.is_playing)
global files
files = c.get_files()
if files == 509:
msg1 = "[B][COLOR tomato]El video excede el limite de visionado diario que Mega impone a los usuarios Free."
msg1 += " Prueba en otro servidor o canal.[/B][/COLOR]"
return False, msg1
elif isinstance(files, (int, long)):
return False, "Error codigo %s" % str(files)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
page_url = page_url.replace('/embed#', '/#')
logger.info("(page_url='%s')" % page_url)
video_urls = []
from megaserver import Client
page_url = page_url.replace('/embed#!', '/#!')
c = Client(url=page_url, is_playing_fnc=platformtools.is_playing)
files = c.get_files()
# si hay mas de 5 archivos crea un playlist con todos
# Esta función (la de la playlist) no va, hay que ojear megaserver/handler.py aunque la llamada este en client.py
if len(files) > 5:
media_url = c.get_play_list()
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [mega]", media_url])

View File

@@ -7,7 +7,6 @@ from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if data == "File was deleted" or data == '':

42
servers/mydaddy.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:http:|)//mydaddy.cc/video/([A-z0-9]+)",
"url": "http://mydaddy.cc/video/\\1"
}
]
},
"free": true,
"id": "mydaddy",
"name": "mydaddy",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

32
servers/mydaddy.py Normal file
View File

@@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector mydaddy By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:
return False, "[mydaddy] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
video_urls = []
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, 'var srca = \[(.*?)\]')
matches = scrapertools.find_multiple_matches(data, 'file: "([^"]+)", label: "([^"]+)"')
for url,quality in matches:
if not url.startswith("http"):
url = "http:%s" % url
if not "Default" in quality:
video_urls.append(["[mydaddy] %s" % quality, url])
return video_urls

View File

@@ -8,11 +8,7 @@
"url": "https://embed.mystream.to/\\1"
},
{
"pattern": "https://mystream\\.premiumserver\\.club/(\\w+)",
"url": "https://embed.mystream.to/\\1"
},
{
"pattern": "https://mstream\\.(?:xyz|icu)/(\\w+)",
"pattern": "https://mystream.to/watch/(\\w+)",
"url": "https://embed.mystream.to/\\1"
}
]

View File

@@ -4,10 +4,8 @@
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from lib import js2py
from lib.aadecode import decode as aadecode
from platformcode import logger
@@ -17,7 +15,7 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[mystream] El archivo no existe o ha sido borrado"
if "<title>video is no longer available" in data.data:
if "<title>video is no longer available" in data.data or "<title>Video not found" in data.data:
return False, "[mystream] El archivo no existe o ha sido borrado"
return True, ""
@@ -28,11 +26,6 @@ def get_video_url(page_url, premium = False, user = "", password = "", video_pas
headers = {'referer': page_url}
data = httptools.downloadpage(page_url, headers=headers).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
for c in scrapertools.find_multiple_matches(data, '<script>(.*?)</script>'):
if 'function vv' in c:
vv = js2py.eval_js(c)
if 'key' in c:
key = js2py.eval_js(c)
code = scrapertools.find_single_match(data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
text_decode = aadecode(code)
matches = scrapertools.find_multiple_matches(text_decode, "'src', '([^']+)'")

42
servers/myupload.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://myupload.co/plugins/mediaplayer/site/_embed.php\\?u=[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "myupload",
"name": "myupload",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

30
servers/myupload.py Normal file
View File

@@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector myupload By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
import base64
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:
return False, "[myupload] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
video_urls = []
data = httptools.downloadpage(page_url).data
matches = scrapertools.find_multiple_matches(data, 'tracker: "([^"]+)"')
for scrapedurl in matches:
url = base64.b64decode(scrapedurl)
video_urls.append(["[myupload]", url])
return video_urls

8
servers/netutv.json Normal file → Executable file
View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
@@ -7,6 +7,10 @@
"pattern": "/netu/tv/(embed_)(.*?$)",
"url": "http://netu.tv/watch_video.php?v=\\2"
},
{
"pattern": "https://waaws.tk/player/embed_player.php?vid=([A-z0-9]+)&autoplay=yes",
"url": "http://netu.tv/watch_video.php?v=\\2"
},
{
"pattern": "(?:hqq|waaw|netu)(?:\\.tv\\/|\\.watch\\/|\\.php\\?).*?(?:v=|vid=)([A-z0-9]+)",
"url": "http://netu.tv/watch_video.php?v=\\1"
@@ -54,4 +58,4 @@
}
],
"thumbnail": "server_netutv.png"
}
}

35
servers/netutv.py Normal file → Executable file
View File

@@ -1,8 +1,20 @@
# -*- coding: utf-8 -*-
import random
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from builtins import chr
from builtins import range
import re
import urllib
from core import httptools
from core import jsontools
@@ -12,6 +24,8 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
#Deshabilitamos el server hasta nueva orden
return False, "[netutv] Servidor deshabilitado"
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
page_url = page_url.replace("/watch_video.php?v=", "/player/embed_player.php?vid=")
data = httptools.downloadpage(page_url).data
@@ -35,17 +49,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
page_url = page_url.replace('https://waaw.tv/', 'http://hqq.watch/')
data = httptools.downloadpage(page_url).data
logger.debug(data)
# ~ logger.debug(data)
# js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
js_wise = scrapertools.find_single_match(data, "<script>\s*;(eval.*?)\s*</script>")
logger.info('JS_WISE= '+ js_wise)
js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data = jswise(js_wise).replace("\\", "")
logger.debug(data)
# ~ logger.debug(data)
alea = str(random.random())[2:]
data_ip = httptools.downloadpage('http://hqq.watch/player/ip.php?type=json&rand=%s' % alea).data
logger.debug(data_ip)
# ~ logger.debug(data_ip)
json_data_ip = jsontools.load(data_ip)
url = scrapertools.find_single_match(data, 'self\.location\.replace\("([^)]+)\)')
@@ -53,15 +65,14 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url = url.replace('"+data.ip+"', json_data_ip['ip'])
url = url.replace('"+need_captcha+"', '0') #json_data_ip['need_captcha'])
url = url.replace('"+token', '')
# logger.info('URL= '+url)
# logger.debug(url)
# ~ logger.debug(url)
headers = { "User-Agent": 'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.127 Large Screen Safari/533.4 GoogleTV/162671' }
data = httptools.downloadpage('http://hqq.watch'+url, headers=headers).data
# logger.debug(data)
# ~ logger.debug(data)
codigo_js = scrapertools.find_multiple_matches(data, '<script>document.write\(unescape\("([^"]+)')
# logger.debug(codigo_js)
# ~ logger.debug(codigo_js)
js_aux = urllib.unquote(codigo_js[0])
at = scrapertools.find_single_match(js_aux, 'var at = "([^"]+)')

74
servers/nuvid.json Normal file
View File

@@ -0,0 +1,74 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "http://(www.nuvid.com)/embed/([0-9]+)",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "http://(www.drtuber.com)/embed/([0-9]+)",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "http://(www.iceporn.com)/embed/([0-9]+)",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "http://(www.viptube.com)/embed/([0-9]+)",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "http://(www.vivatube.com)/embed/([0-9]+)",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "https://(www.tubeon.com)/es/video/([0-9]+)/",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "https://(www.hd21.com)/es/video/([0-9]+)/",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "https://(www.yeptube.com)/es/video/([0-9]+)/",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
},
{
"pattern": "https://(www.winporn.com)/es/video/([0-9]+)/",
"url": "https://\\1/player_config_json/?vid=\\2&aid=0&domain_id=0&embed=0&ref=null&check_speed=0"
}
]
},
"free": true,
"id": "nuvid",
"name": "nuvid",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

23
servers/nuvid.py Normal file
View File

@@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
server = scrapertools.find_single_match(page_url, 'https://www.([A-z0-9-]+).com')
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, '"files":(.*?)"quality"')
patron = '"([lh])q":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for quality, scrapedurl in matches:
url = scrapedurl.replace("\/", "/")
if "l" in quality: quality = "360p"
if "h" in quality: quality = "720p"
video_urls.append(["[%s] %s" %(server,quality), url])
return video_urls

View File

@@ -1,6 +1,15 @@
# -*- coding: utf-8 -*-
import urllib
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from core import httptools
from core import scrapertools

49
servers/oprem.json Normal file
View File

@@ -0,0 +1,49 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://[a-zA-Z0-9]+\\.openloadpremium\\.com\\/m3u8\\/.*?\\.m3u8)",
"url": "\\1"
},
{
"pattern": "(https://[a-zA-Z0-9]+\\.pelisplay\\.tv/.*?m3u8.php\\?id=\\d+)",
"url": "\\1"
},
{
"pattern": "streamium\\.xyz/.*?hash=([a-zA-Z0-9]+)",
"url": "http://streamium.xyz/files/\\1"
}
]
},
"free": true,
"id": "oprem",
"name": "oprem",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

36
servers/oprem.py Normal file
View File

@@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Oprem By Alfa development Group
# --------------------------------------------------------
import os
from core import httptools
from platformcode import logger, config
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if 'peliculonhd' in page_url:
import re
patron = r'/mpegTS/([^/]+)/([^\s]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for _id, quota in matches:
old = '/mpegTS/%s/%s' % (_id, quota)
gurl = 'https://lh3.googleusercontent.com/d/%s?quotaUser=%s'
new = gurl % (_id, quota)
data = data.replace(old, new)
data = data.replace('s://lh3.googleusercontent.com', '://localhost:8781')
m3u8 = os.path.join(config.get_data_path(), "op_master.m3u8")
outfile = open(m3u8, 'wb')
outfile.write(data)
outfile.close()
page_url = m3u8
from lib import servop
servop.start()
video_urls = [["%s [Oprem]" % page_url[-4:], page_url]]
return video_urls

50
servers/pornhub.json Normal file
View File

@@ -0,0 +1,50 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://(?:www.|)pornhub.com/embed/([A-z0-9]+)",
"url": "https://www.pornhub.com/view_video.php?viewkey=\\1"
},
{
"pattern": "(https://www.pornhub.com/view_video.php?viewkey=[A-z0-9]+)",
"url": "\\1"
},
{
"pattern": "(pornhub.com/view_video.php\\?viewkey=[A-z0-9]+)",
"url": "https://www.\\1"
}
]
},
"free": true,
"id": "pornhub",
"name": "pornhub",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

38
servers/pornhub.py Normal file
View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector pornhub By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "removed" in response.data \
or not "defaultQuality" in response.data \
or "is no longer available" in response.data:
return False, "[pornhub] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info()
video_urls = []
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, '<div id="vpContentContainer">(.*?)</script>')
data = data.replace('" + "', '')
videourl = scrapertools.find_multiple_matches(data, 'var quality_(\d+)p=(.*?);')
scrapertools.printMatches(videourl)
for scrapedquality,scrapedurl in videourl:
orden = scrapertools.find_multiple_matches(scrapedurl, '\*\/([A-z0-9]+)')
logger.debug(orden)
url= ""
for i in orden:
url += scrapertools.find_single_match(data, '%s="([^"]+)"' %i)
logger.debug(url)
video_urls.append([scrapedquality + "p [pornhub]", url])
return video_urls

42
servers/pornrewind.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.pornrewind.com/embed/\\w+)",
"url": "\\1"
}
]
},
"free": true,
"id": "pornrewind",
"name": "pornrewind",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

34
servers/pornrewind.py Normal file
View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector pornrewind By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
from lib.kt_player import decode
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:
return False, "[pornrewind] El fichero no existe o ha sido borrado"
global video_url, license_code
video_url = scrapertools.find_single_match(response.data, "video_url: '([^']+)'")
license_code = scrapertools.find_single_match(response.data, "license_code: '([^']+)'")
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
# mediaurl = decode(video_url, license_code)
mediaurl = video_url
if not mediaurl.startswith("https"):
mediaurl = "https://%s" % mediaurl
return [["[pornrewind]", mediaurl]]

4
servers/powvideo.json Normal file → Executable file
View File

@@ -4,8 +4,8 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:powvideo|povw1deo).(?:net|xyz|com)/(?:embed-|iframe-|preview-|)([a-z0-9]+)",
"url": "http://powvideo.net/iframe-\\1-1536x701.html"
"pattern": "(?:powvideo|povw1deo|powvldeo).(?:net|xyz|com|cc)/(?:embed-|iframe-|preview-|)([a-z0-9]+)",
"url": "https://powvldeo.cc/iframe-\\1-920x360.html"
}
]
},

View File

@@ -1,5 +1,11 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
@@ -12,8 +18,10 @@ def test_video_exists(page_url):
referer = page_url.replace('iframe', 'preview')
httptools.downloadpage(referer)
global data
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
if data == "File was deleted" or data == '':
return False, "[powvideo] El video ha sido borrado"
if 'function(p,a,c,k,e,' not in data:
@@ -25,16 +33,14 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info()
itemlist = []
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
url = scrapertools.find_single_match(unpacked, "(?:src):\\\\'([^\\\\]+.mp4)\\\\'")
if not PY3: from lib import alfaresolver
else: from lib import alfaresolver_py3 as alfaresolver
url = alfaresolver.decode_video_url(url, data, 2)
itemlist.append([".mp4" + " [powvideo]", url])
itemlist.sort(key=lambda x: x[0], reverse=True)
return itemlist
return itemlist

42
servers/prostream.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "prostream.to/(?:embed-|)([A-z0-9]+)",
"url": "https://prostream.to/embed-\\1.html"
}
]
},
"free": true,
"id": "prostream",
"name": "prostream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://prostream.to/assets/img/logo.png"
}

33
servers/prostream.py Normal file
View File

@@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Prostream By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "File is no longer available" in data:
return False, "[Prostream] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
ext = 'mp4'
packed = scrapertools.find_single_match(data, "text/javascript'>(eval.*?)\s*</script>")
unpacked = jsunpack.unpack(packed)
media_url = scrapertools.find_single_match(unpacked, r'sources:\s*\["([^"]+)"')
ext = media_url[-4:]
video_urls.append(["%s [Prostream]" % (ext), media_url])
return video_urls

2
servers/rapidvideo.json Normal file → Executable file
View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "rapidvideo.(?:org|com)/(?:\\?v=|e/|embed/|v/|)([A-z0-9]+)",
"pattern": "(?:rapidvideo|rapidvid).(?:org|com|to|is)/(?:\\?v=|e/|embed/|v/|d/)([A-z0-9]+)",
"url": "https://www.rapidvideo.com/e/\\1"
}
]

23
servers/rapidvideo.py Normal file → Executable file
View File

@@ -36,17 +36,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
match = scrapertools.find_multiple_matches(data, patron)
if match:
for url1 in match:
res = scrapertools.find_single_match(url1, '=(\w+)')
data = httptools.downloadpage(url1).data
if "Please click on this button to open this video" in data:
data = httptools.downloadpage(url1, post=post).data
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
ext = scrapertools.get_filename_from_url(url)[-4:]
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])
res = scrapertools.find_single_match(url1, '=(\w+)')
data = httptools.downloadpage(url1).data
if "Please click on this button to open this video" in data:
data = httptools.downloadpage(url1, post=post).data
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
ext = scrapertools.get_filename_from_url(url)[-4:]
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])
else:
patron = 'data-setup.*?src="([^"]+)".*?'
patron += 'type="([^"]+)"'
patron = 'src="([^"]+)" type="video/([^"]+)" label="([^"]+)"'
match = scrapertools.find_multiple_matches(data, patron)
for url, ext in match:
video_urls.append(['%s [rapidvideo]' % (ext), url])
if match:
for url, ext, res in match:
video_urls.append(['.%s %s [Rapidvideo]' % (ext, res), url])
return video_urls

42
servers/rcdnme.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "rcdn.me/(?:flash-|embed-|)([A-z0-9]+)",
"url": "https://rcdn.me/embed/\\1"
}
]
},
"free": true,
"id": "rcdnme",
"name": "rcdnme",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.imgur.com/NErNgTg.jpg"
}

49
servers/rcdnme.py Normal file
View File

@@ -0,0 +1,49 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Rcdnme By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if "Object not found" in data.data or "longer exists on our servers" in data.data:
return False, "[Rcdnme] El archivo no existe o ha sido borrado"
if data.code == 500:
return False, "[Rcdnme] Error interno del servidor"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "p,a,c,k,e,d" in data:
data = jsunpack.unpack(data).replace("\\", "")
video_urls = []
videos = scrapertools.find_multiple_matches(data, 'file":"([^"]+)","label":"(.*?)"')
subtitulo = scrapertools.find_single_match(data, 'tracks:\s*\[{"file":"(.*?)"')
if "http" not in subtitulo and subtitulo != "":
subtitulo = "https://rcdn.me" + subtitulo
for video_url, video_calidad in videos:
extension = scrapertools.get_filename_from_url(video_url)[-4:]
video_url = video_url.replace("\\", "")
if extension not in [".vtt", ".srt"]:
video_urls.append(["%s %s [rcdnme]" % (extension, video_calidad), video_url, 0, subtitulo])
try:
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
except:
pass
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

Some files were not shown because too many files have changed in this diff Show More