Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.4.9" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.4.11" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,11 +19,12 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» pelisfox » pelisgratis
|
||||
» gamovideo » doomtv
|
||||
» usercloud » ciberpeliculashd
|
||||
» pordede ¤ arreglos internos
|
||||
[COLOR green]Gracias a [B][COLOR yellow]f_y_m[/COLOR][/B] por su colaboración en esta versión[/COLOR]
|
||||
» plusdede » pelisgratis
|
||||
» seriesblanco » anitoonstv
|
||||
» openload » powvideo
|
||||
» streamplay » clipwatching
|
||||
» flashx ¤ arreglos internos
|
||||
[COLOR green]Gracias a [B][COLOR yellow]danielr460[/COLOR][/B] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -32,14 +32,14 @@ def mainlist(item):
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Anime", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php",
|
||||
thumbnail=thumb_series))
|
||||
#itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host,
|
||||
# thumbnail=thumb_series))
|
||||
#itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host,
|
||||
# thumbnail=thumb_series))
|
||||
#itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
|
||||
# thumbnail=thumb_series))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
@@ -52,45 +52,38 @@ def lista(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
if 'Novedades' in item.title:
|
||||
patron_cat = '<div class="activos"><h3>(.+?)<\/h2><\/a><\/div>'
|
||||
patron = '<a href="(.+?)"><h2><span>(.+?)<\/span>'
|
||||
else:
|
||||
patron_cat = '<li><a href=.+?>'
|
||||
patron_cat += str(item.title)
|
||||
patron_cat += '<\/a><div>(.+?)<\/div><\/li>'
|
||||
patron = "<a href='(.+?)'>(.+?)<\/a>"
|
||||
data = scrapertools.find_single_match(data, patron_cat)
|
||||
#logger.info("Pagina para regex "+data)
|
||||
patron = '<div class="serie">' #Encabezado regex
|
||||
patron +="<a href='(.+?)'>" #scrapedurl
|
||||
patron +="<img src='(.+?)'.+?" #scrapedthumbnail
|
||||
patron +="<p class='.+?'>(.+?)<\/p>" #scrapedtitle
|
||||
patron +=".+?<span .+?>(.+?)<\/span>" #scrapedplot
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link, name in matches:
|
||||
if "Novedades" in item.title:
|
||||
url = link
|
||||
title = name.capitalize()
|
||||
else:
|
||||
url = host + link
|
||||
title = name
|
||||
if ":" in title:
|
||||
cad = title.split(":")
|
||||
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedplot in matches:
|
||||
if ":" in scrapedtitle:
|
||||
cad = scrapedtitle.split(":")
|
||||
show = cad[0]
|
||||
else:
|
||||
if "(" in title:
|
||||
cad = title.split("(")
|
||||
if "Super" in title:
|
||||
if "(" in scrapedtitle:
|
||||
cad = scrapedtitle.split("(")
|
||||
if "Super" in scrapedtitle:
|
||||
show = cad[1]
|
||||
show = show.replace(")", "")
|
||||
else:
|
||||
show = cad[0]
|
||||
else:
|
||||
show = title
|
||||
show = scrapedtitle
|
||||
if "&" in show:
|
||||
cad = title.split("xy")
|
||||
cad = scrapedtitle.split("xy")
|
||||
show = cad[0]
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(
|
||||
item.clone(title=title, url=url, plot=show, action="episodios", show=show,
|
||||
context=context1))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
scrapedurl=host+scrapedurl
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=scrapedplot,
|
||||
thumbnail=scrapedthumbnail, action="episodios", show=show, context=context))
|
||||
#tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -102,7 +95,7 @@ def episodios(item):
|
||||
|
||||
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
patron_caps = "<a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
|
||||
patron_caps = "<li><a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
|
||||
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")
|
||||
@@ -128,7 +121,7 @@ def episodios(item):
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
action="add_serie_to_library", extra="episodios", show=item.title))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -150,29 +143,30 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data_vid = scrapertools.find_single_match(data1, '<div class="videos">(.+?)<\/div><div .+?>')
|
||||
|
||||
data_vid = scrapertools.find_single_match(data1, 'var q = \[ \[(.+?)\] \]')
|
||||
# name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>')
|
||||
scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>')
|
||||
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
|
||||
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
|
||||
for server, quality, url in itemla:
|
||||
if "HQ" in quality:
|
||||
quality = "HD"
|
||||
if "Calidad Alta" in quality:
|
||||
quality = "HQ"
|
||||
if " Calidad media - Carga mas rapido" in quality:
|
||||
quality = "360p"
|
||||
server = server.lower().strip()
|
||||
if "ok" in server:
|
||||
server = 'okru'
|
||||
if "rapid" in server:
|
||||
server = 'rapidvideo'
|
||||
if "netu" in server:
|
||||
server = 'netutv'
|
||||
itemla = scrapertools.find_multiple_matches(data_vid, '"(.+?)"')
|
||||
for url in itemla:
|
||||
url=url.replace('\/', '/')
|
||||
server1=url.split('/')
|
||||
server=server1[2]
|
||||
if "." in server:
|
||||
server1=server.split('.')
|
||||
if len(server1)==3:
|
||||
server=server1[1]
|
||||
else:
|
||||
server=server1[0]
|
||||
if "goo" in url:
|
||||
url = googl(url)
|
||||
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
server='netutv'
|
||||
if "ok" in url:
|
||||
url = "https:"+url
|
||||
server='okru'
|
||||
quality="360p"
|
||||
itemlist.append(item.clone(url=url, action="play",
|
||||
thumbnail=scrapedthumbnail, server=server, plot=scrapedplot,
|
||||
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
@@ -275,11 +275,23 @@ def start(itemlist, item):
|
||||
videoitem = resolved_item[0]
|
||||
|
||||
# si no directamente reproduce y marca como visto
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_auto_as_watched(item)
|
||||
#platformtools.play_video(videoitem)
|
||||
videoitem.contentChannel='videolibrary'
|
||||
launcher.run(videoitem)
|
||||
import importlib
|
||||
actual_server="servers."+videoitem.server
|
||||
i = importlib.import_module(actual_server)
|
||||
#from servers import streamango
|
||||
try:
|
||||
testv=i.test_video_exists(videoitem.url)
|
||||
logger.info(testv)
|
||||
except:
|
||||
testv=(True,'')
|
||||
logger.debug("La funcion no existe en el conector "+videoitem.server)
|
||||
testvideo=list(testv)
|
||||
if testvideo[0]==True:
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_auto_as_watched(item)
|
||||
#platformtools.play_video(videoitem)
|
||||
launcher.run(videoitem)
|
||||
|
||||
try:
|
||||
if platformtools.is_playing():
|
||||
|
||||
@@ -646,6 +646,7 @@ def findvideos(item):
|
||||
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
|
||||
|
||||
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
|
||||
|
||||
try:
|
||||
data_js = jhexdecode(data_js)
|
||||
except:
|
||||
@@ -667,10 +668,11 @@ def findvideos(item):
|
||||
infolabels["year"] = year
|
||||
matches = []
|
||||
for match in data_decrypt:
|
||||
prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
|
||||
function = prov["l"].replace("code", match["code"]).replace("var_1", match["code"])
|
||||
prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\']\})' % match["provider"]))
|
||||
|
||||
url = scrapertools.find_single_match(function, "return\s*(.*?)[;]*\}")
|
||||
server_url = scrapertools.find_single_match(prov['l'], 'return\s*"(.*?)"')
|
||||
|
||||
url = '%s%s' % (server_url, match['code'])
|
||||
url = re.sub(r'\'|"|\s|\+', '', url)
|
||||
url = re.sub(r'var_\d+\[\d+\]', '', url)
|
||||
embed = prov["e"]
|
||||
@@ -691,6 +693,8 @@ def findvideos(item):
|
||||
'<meta property="og:description" content="([^"]+)"')
|
||||
plot = scrapertools.htmlclean(plot)
|
||||
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
|
||||
|
||||
|
||||
if account:
|
||||
url += "###" + id + ";" + type
|
||||
it2.append(
|
||||
@@ -826,8 +830,19 @@ def get_status(status, type, id):
|
||||
|
||||
|
||||
def jhexdecode(t):
|
||||
r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t)
|
||||
r = re.sub(r'_\d+x\w+', 'var_0', r)
|
||||
|
||||
|
||||
|
||||
k = re.sub(r'(_0x.{4})(?=\(|=)', 'var_0', t).replace('\'','\"')
|
||||
def to_hex(c, type):
|
||||
h = int("%s" % c, 16)
|
||||
if type == '1':
|
||||
return 'p[%s]' % h
|
||||
if type == '2':
|
||||
return '[%s]' % h
|
||||
|
||||
x = re.sub(r'(?:p\[)(0x.{,2})(?:\])', lambda z: to_hex(z.group(1), '1'), k)
|
||||
y = re.sub(r'(?:\(")(0x.{,2})(?:"\))', lambda z: to_hex(z.group(1), '2'), x)
|
||||
|
||||
def to_hx(c):
|
||||
h = int("%s" % c.groups(0), 16)
|
||||
@@ -835,8 +850,14 @@ def jhexdecode(t):
|
||||
return chr(h)
|
||||
else:
|
||||
return ""
|
||||
r = re.sub(r'(?:\\|)x(\w{2})(?=[^\w\d])', to_hx, y).replace('var ', '')
|
||||
server_list = eval(scrapertools.find_single_match(r, '=(\[.*?\])'))
|
||||
|
||||
r = re.sub(r'(?:\\|)x(\w{2})', to_hx, r).replace('var ', '')
|
||||
for val in range(475,0, -1):
|
||||
server_list.append(server_list[0])
|
||||
server_list.pop(0)
|
||||
|
||||
r = re.sub(r'=\[(.*?)\]', '=%s' % str(server_list), r)
|
||||
|
||||
f = eval(scrapertools.get_match(r, '\s*var_0\s*=\s*([^;]+);'))
|
||||
for i, v in enumerate(f):
|
||||
|
||||
@@ -135,9 +135,8 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'class=(?:MvTbImg|TPostMv).*?href=(.*?)\/(?:>| class).*?src=(.*?) class=attachment.*?'
|
||||
patron += '(?:strong|class=Title)>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?'
|
||||
patron += '(?:<td|class=Description)>(.*?)<(?:\/td|\/p)>'
|
||||
patron = 'class=(?:MvTbImg|TPostMv).*?href=(.*?)\/(?:>| class).*?src=(.*?) '
|
||||
patron += 'class=Title>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?(?:<td|class=Description)>(.*?)<(?:\/td|\/p)>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
|
||||
|
||||
@@ -34,7 +34,7 @@ def login():
|
||||
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
|
||||
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
|
||||
url = "https://www.plusdede.com/"
|
||||
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36","Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
|
||||
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
|
||||
replace_headers=False).data
|
||||
@@ -772,13 +772,13 @@ def checkseen(item):
|
||||
if item.tipo == "8":
|
||||
url_temp = "https://www.plusdede.com/set/episode/" + item.data_id + "/seen"
|
||||
tipo_str = "series"
|
||||
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
else:
|
||||
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
|
||||
tipo_str = "pelis"
|
||||
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
|
||||
@@ -931,7 +931,7 @@ def plusdede_check(item):
|
||||
tipo_str = "listas"
|
||||
else:
|
||||
tipo_str = "pelis"
|
||||
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36","Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
|
||||
|
||||
@@ -305,7 +305,6 @@ def findvideos(item):
|
||||
|
||||
if filtro_enlaces != 1:
|
||||
list_links.extend(parse_videos(item, "Descargar", online[-1]))
|
||||
|
||||
list_links = filtertools.get_links(list_links, item, list_idiomas, CALIDADES)
|
||||
|
||||
for i in range(len(list_links)):
|
||||
@@ -315,6 +314,7 @@ def findvideos(item):
|
||||
d=c[0].rstrip( )
|
||||
d=d.lstrip( )
|
||||
list_links[i].server=d.replace("streamix", "streamixcloud")
|
||||
list_links[i].server=d.replace("uploaded", "uploadedto")
|
||||
|
||||
list_links = servertools.get_servers_itemlist(list_links)
|
||||
autoplay.start(list_links, item)
|
||||
|
||||
@@ -25,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
|
||||
|
||||
# Headers por defecto, si no se especifica nada
|
||||
default_headers = dict()
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3163.100 Safari/537.36"
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3163.100 Safari/537.36"
|
||||
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
|
||||
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
|
||||
default_headers["Accept-Charset"] = "UTF-8"
|
||||
|
||||
42
plugin.video.alfa/servers/clipwatching.json
Normal file
42
plugin.video.alfa/servers/clipwatching.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "clipwatching.com/(.*?).html",
|
||||
"url": "http://clipwatching.com/\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "clipwatching",
|
||||
"name": "clipwatching",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s17.postimg.org/e6kcan0vj/clipwatching1.png"
|
||||
}
|
||||
25
plugin.video.alfa/servers/clipwatching.py
Normal file
25
plugin.video.alfa/servers/clipwatching.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data or "to copyright issues" in data:
|
||||
return False, "[clipwatching] El video ha sido borrado"
|
||||
if "please+try+again+later." in data:
|
||||
return False, "[clipwatching] Error de clipwatching, no se puede generar el enlace al video"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
videourl, label = scrapertools.find_single_match(data, 'file:"([^"]+).*?label:"([^"]+)')
|
||||
video_urls.append([label + " [clipwatching]", videourl])
|
||||
|
||||
return video_urls
|
||||
@@ -16,6 +16,8 @@ def test_video_exists(page_url):
|
||||
data = httptools.downloadpage(page_url, cookies=False).data
|
||||
if 'file was deleted' in data:
|
||||
return False, "[FlashX] El archivo no existe o ha sido borrado"
|
||||
elif 'File Not Found' in data:
|
||||
return False, "[FlashX] El archivo no existe"
|
||||
elif 'Video is processing now' in data:
|
||||
return False, "[FlashX] El archivo se está procesando"
|
||||
|
||||
|
||||
@@ -3,9 +3,7 @@
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import config, logger
|
||||
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'}
|
||||
|
||||
from core import jsontools
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
@@ -22,132 +20,154 @@ def test_video_exists(page_url):
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(page_url, cookies=False).data
|
||||
|
||||
|
||||
header = {}
|
||||
if "|" in page_url:
|
||||
page_url, referer = page_url.split("|", 1)
|
||||
header = {'Referer': referer}
|
||||
data = httptools.downloadpage(page_url, headers=header, cookies=False).data
|
||||
subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
|
||||
# Header para la descarga
|
||||
header_down = "|User-Agent=" + headers['User-Agent']
|
||||
|
||||
try:
|
||||
from lib.aadecode import decode as aadecode
|
||||
if "videocontainer" not in data:
|
||||
url = page_url.replace("/embed/", "/f/")
|
||||
data = httptools.downloadpage(url, cookies=False).data
|
||||
code = scrapertools.find_single_match(data, '<span[^>]+id="[^"]+">([^<]{40,})</span>' )
|
||||
_0x59ce16 = eval(scrapertools.find_single_match(data, '_0x59ce16=([^;]+)').replace('parseInt', 'int'))
|
||||
_1x4bfb36 = eval(scrapertools.find_single_match(data, '_1x4bfb36=([^;]+)').replace('parseInt', 'int'))
|
||||
parseInt = eval(scrapertools.find_single_match(data, '_0x30725e,(\(parseInt.*?)\),').replace('parseInt', 'int'))
|
||||
url = decode(code, parseInt, _0x59ce16, _1x4bfb36)
|
||||
url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get('location')
|
||||
extension = scrapertools.find_single_match(url, '(\..{,3})\?')
|
||||
itemlist.append([extension, url, 0,subtitle])
|
||||
|
||||
text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
|
||||
text_decode = ""
|
||||
for t in text_encode:
|
||||
text_decode += aadecode(t)
|
||||
except Exception:
|
||||
logger.info()
|
||||
if config.get_setting('api', __file__):
|
||||
url = get_link_api(page_url)
|
||||
extension = scrapertools.find_single_match(url, '(\..{,3})\?')
|
||||
if url:
|
||||
itemlist.append([extension, url, 0,subtitle])
|
||||
logger.debug(itemlist)
|
||||
|
||||
var_r = scrapertools.find_single_match(text_decode, "window\.[A-z]+\s*=\s*['\"]([^'\"]+)['\"]")
|
||||
var_encodes = scrapertools.find_multiple_matches(data, 'id="%s[^"]*">([^<]+)<' % var_r)
|
||||
numeros = scrapertools.find_single_match(data, '_[A-f0-9]+x[A-f0-9]+\s*(?:=|\^)\s*([0-9]{4,}|0x[A-f0-9]{4,})')
|
||||
op1, op2 = scrapertools.find_single_match(data, '\(0x(\d),0x(\d)\);')
|
||||
idparse, hexparse = scrapertools.find_multiple_matches(data, "parseInt\('([0-9]+)'")
|
||||
# numeros = [numeros, str(int(hexparse, 8))]
|
||||
rangos, rangos2 = scrapertools.find_single_match(data, "\)-([0-9]+).0x4\)/\(([0-9]+)")
|
||||
videourl = ""
|
||||
for encode in var_encodes:
|
||||
text_decode = ""
|
||||
try:
|
||||
mult = int(op1) * int(op2)
|
||||
rango1 = encode[:mult]
|
||||
decode1 = []
|
||||
for i in range(0, len(rango1), 8):
|
||||
decode1.append(int(rango1[i:i + 8], 16))
|
||||
rango1 = encode[mult:]
|
||||
j = 0
|
||||
i = 0
|
||||
while i < len(rango1):
|
||||
index1 = 64
|
||||
value1 = 0
|
||||
value2 = 0
|
||||
value3 = 0
|
||||
while True:
|
||||
if (i + 1) >= len(rango1):
|
||||
index1 = 143
|
||||
value3 = int(rango1[i:i + 2], 16)
|
||||
i += 2
|
||||
data = value3 & 63
|
||||
value2 += data << value1
|
||||
value1 += 6
|
||||
if value3 < index1:
|
||||
break
|
||||
return itemlist
|
||||
|
||||
# value4 = value2 ^ decode1[j % (mult / 8)] ^ int(idparse,8)
|
||||
# for n in numeros:
|
||||
# if not n.isdigit():
|
||||
# n = int(n, 16)
|
||||
# value4 ^= int(n)
|
||||
value4 = value2 ^ decode1[(j % 9)] ^ (int(idparse, 8) - int(rangos) + 4) / (int(rangos2) - 8) ^ int(hexparse, 8)
|
||||
value5 = index1 * 2 + 127
|
||||
for h in range(4):
|
||||
valorfinal = (value4 >> 8 * h) & (value5)
|
||||
valorfinal = chr(valorfinal - 1)
|
||||
if valorfinal != "%":
|
||||
text_decode += valorfinal
|
||||
j += 1
|
||||
except:
|
||||
continue
|
||||
|
||||
videourl = "https://openload.co/stream/%s?mime=true" % text_decode
|
||||
resp_headers = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True)
|
||||
videourl = resp_headers.headers["location"].replace("https", "http").replace("?mime=true", "")
|
||||
extension = resp_headers.headers["content-type"]
|
||||
break
|
||||
def decode(code, parseInt, _0x59ce16, _1x4bfb36):
|
||||
logger.info()
|
||||
import math
|
||||
|
||||
# Falla el método, se utiliza la api aunque en horas punta no funciona
|
||||
if not videourl:
|
||||
videourl, extension = get_link_api(page_url)
|
||||
except:
|
||||
import traceback
|
||||
logger.info(traceback.format_exc())
|
||||
# Falla el método, se utiliza la api aunque en horas punta no funciona
|
||||
videourl, extension = get_link_api(page_url)
|
||||
_0x1bf6e5 = ''
|
||||
ke = []
|
||||
|
||||
extension = extension.replace("video/", ".").replace("application/x-", ".")
|
||||
if not extension:
|
||||
try:
|
||||
extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
|
||||
extension = "." + extension.rsplit(".", 1)[1]
|
||||
except:
|
||||
pass
|
||||
for i in range(0, len(code[0:9*8]),8):
|
||||
ke.append(int(code[i:i+8],16))
|
||||
|
||||
if config.get_platform() != "plex":
|
||||
video_urls.append([extension + " [Openload] ", videourl + header_down, 0, subtitle])
|
||||
else:
|
||||
video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])
|
||||
_0x439a49 = 0
|
||||
_0x145894 = 0
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
while _0x439a49 < len(code[9*8:]):
|
||||
_0x5eb93a = 64
|
||||
_0x896767 = 0
|
||||
_0x1a873b = 0
|
||||
_0x3c9d8e = 0
|
||||
while True:
|
||||
if _0x439a49 + 1 >= len(code[9*8:]):
|
||||
_0x5eb93a = 143;
|
||||
|
||||
return video_urls
|
||||
_0x3c9d8e = int(code[9*8+_0x439a49:9*8+_0x439a49+2], 16)
|
||||
_0x439a49 +=2
|
||||
|
||||
if _0x1a873b < 6*5:
|
||||
_0x332549 = _0x3c9d8e & 63
|
||||
_0x896767 += _0x332549 << _0x1a873b
|
||||
else:
|
||||
_0x332549 = _0x3c9d8e & 63
|
||||
_0x896767 += int(_0x332549 * math.pow(2, _0x1a873b))
|
||||
|
||||
_0x1a873b += 6
|
||||
if not _0x3c9d8e >= _0x5eb93a: break
|
||||
|
||||
# _0x30725e = _0x896767 ^ ke[_0x145894 % 9] ^ _0x59ce16 ^ parseInt ^ _1x4bfb36
|
||||
_0x30725e = _0x896767 ^ ke[_0x145894 % 9] ^ parseInt ^ _1x4bfb36
|
||||
_0x2de433 = _0x5eb93a * 2 + 127
|
||||
|
||||
for i in range(4):
|
||||
_0x3fa834 = chr(((_0x30725e & _0x2de433) >> (9*8/ 9)* i) - 1)
|
||||
if _0x3fa834 != '$':
|
||||
_0x1bf6e5 += _0x3fa834
|
||||
_0x2de433 = (_0x2de433 << (9*8/ 9))
|
||||
|
||||
_0x145894 += 1
|
||||
|
||||
|
||||
url = "https://openload.co/stream/%s?mime=true" % _0x1bf6e5
|
||||
return url
|
||||
|
||||
|
||||
def login():
|
||||
logger.info()
|
||||
data = httptools.downloadpage('https://openload.co').data
|
||||
_csrf = scrapertools.find_single_match(data, '<input type="hidden" name="_csrf" value="([^"]+)">')
|
||||
|
||||
post = {
|
||||
'LoginForm[email]' : config.get_setting('user', __file__),
|
||||
'LoginForm[password]' : config.get_setting('passowrd', __file__),
|
||||
'LoginForm[rememberMe]' : 1,
|
||||
'_csrf' : _csrf
|
||||
}
|
||||
data = httptools.downloadpage('https://openload.co/login', post = post).data
|
||||
|
||||
if 'Login key has already been sent.' in data:
|
||||
while True :
|
||||
if 'Invalid login key.' in data:
|
||||
platformtools.dialog_ok('openload', 'El código introducido no es válido\nrevisa tu correo e introduce el código correcto')
|
||||
|
||||
code = platformtools.dialog_input( post.get('LoginForm[loginkey]', ''),
|
||||
'Introduzca el código que ha sido enviado a \'%s\'' % 'r_dav'
|
||||
)
|
||||
if not code:
|
||||
break
|
||||
else:
|
||||
post['LoginForm[loginkey]'] = code
|
||||
data = httptools.downloadpage('https://openload.co/login', post = post).data
|
||||
|
||||
if 'Welcome back,' in data: break
|
||||
|
||||
|
||||
def get_api_keys():
|
||||
logger.info()
|
||||
api_login = config.get_setting('api_login', __file__)
|
||||
api_key = config.get_setting('api_key', __file__)
|
||||
if not api_key or not api_login:
|
||||
login()
|
||||
data = httptools.downloadpage('https://openload.co/account').data
|
||||
post = {
|
||||
'FTPKey[password]' : config.get_setting('password', __file__),
|
||||
'_csrf' : scrapertools.find_single_match(data, '<input type="hidden" name="_csrf" value="([^"]+)">')
|
||||
}
|
||||
|
||||
|
||||
data = httptools.downloadpage('https://openload.co/account', post = post).data
|
||||
api_login = scrapertools.find_single_match(data, '<tr><td>ID:</td><td>([^<]+)</td></tr>')
|
||||
api_key = scrapertools.find_single_match(data, 'Your FTP Password/API Key is: ([^<]+) </div>')
|
||||
config.set_setting('api_login', api_login, __file__)
|
||||
config.set_setting('api_key', api_key, __file__)
|
||||
|
||||
return api_login, api_key
|
||||
|
||||
|
||||
def get_link_api(page_url):
|
||||
from core import jsontools
|
||||
file_id = scrapertools.find_single_match(page_url, '(?:embed|f)/([0-9a-zA-Z-_]+)')
|
||||
login = "97b2326d7db81f0f"
|
||||
key = "AQFO3QJQ"
|
||||
data = httptools.downloadpage(
|
||||
"https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, login, key)).data
|
||||
data = jsontools.load(data)
|
||||
logger.info()
|
||||
|
||||
api_login, api_key = get_api_keys()
|
||||
|
||||
file_id = scrapertools.find_single_match(page_url, '(?:embed|f)/([0-9a-zA-Z-_]+)')
|
||||
|
||||
data = httptools.downloadpage("https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, api_login, api_key)).data
|
||||
data = jsontools.load_json(data)
|
||||
# logger.info(data)
|
||||
if data["status"] == 200:
|
||||
ticket = data["result"]["ticket"]
|
||||
data = httptools.downloadpage("https://api.openload.co/1/file/dl?file=%s&ticket=%s" % (file_id, ticket)).data
|
||||
data = jsontools.load(data)
|
||||
extension = data["result"]["content_type"]
|
||||
videourl = data['result']['url']
|
||||
videourl = videourl.replace("https", "http")
|
||||
return videourl, extension
|
||||
|
||||
return "", ""
|
||||
return data['result']['url'].replace("https", "http")
|
||||
|
||||
@@ -10,7 +10,6 @@ from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0']]
|
||||
host = "http://powvideo.net/"
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
@@ -22,21 +21,28 @@ def test_video_exists(page_url):
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
#logger.info("(page_url='%s')" % page_url)
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
referer = page_url.replace('iframe', 'preview')
|
||||
|
||||
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
|
||||
_0xa3e8 = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
|
||||
|
||||
if data == "File was deleted":
|
||||
return "El archivo no existe o ha sido borrado"
|
||||
|
||||
if 'Video is processing now' in data:
|
||||
return "El vídeo está siendo procesado, intentalo de nuevo mas tarde"
|
||||
|
||||
var = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
|
||||
|
||||
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
|
||||
video_urls = []
|
||||
|
||||
url = scrapertools.find_single_match(unpacked, "(?:src):\\\\'([^\\\\]+.mp4)\\\\'")
|
||||
video_urls.append([".mp4" + " [powvideo]", S(_0xa3e8).decode(url)])
|
||||
|
||||
video_urls.sort(key=lambda x: x[0], reverse=True)
|
||||
return video_urls
|
||||
itemlist.append([".mp4" + " [powvideo]", S(var).decode(url)])
|
||||
itemlist.sort(key=lambda x: x[0], reverse=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
class S:
|
||||
@@ -50,21 +56,21 @@ class S:
|
||||
self.d = None
|
||||
|
||||
var = eval(var)
|
||||
for x in range(0x1f0, 0, -1):
|
||||
for x in range(0xd3, 0, -1):
|
||||
var.append(var.pop(0))
|
||||
|
||||
self.var = var
|
||||
|
||||
self.t(
|
||||
self.decode_index('0x22', '!UJH') +
|
||||
self.decode_index('0x23', 'NpE)') +
|
||||
self.decode_index('0x24', '4uT2') +
|
||||
self.decode_index('0x23', 'NpE)'),
|
||||
self.decode_index('0x25', '@ZC2')
|
||||
self.decode_index('0xc') +
|
||||
self.decode_index('0d') +
|
||||
self.decode_index('0xe') +
|
||||
self.decode_index('0xf'),
|
||||
self.decode_index('0x10')
|
||||
)
|
||||
|
||||
def decode_index(self, index, key):
|
||||
b64_data = self.var[int(index, 16)];
|
||||
def decode_index(self, index, key=None):
|
||||
b64_data = self.var[int(index, 16)]
|
||||
result = ''
|
||||
_0xb99338 = 0x0
|
||||
_0x25e3f4 = 0x0
|
||||
@@ -72,25 +78,28 @@ class S:
|
||||
data = base64.b64decode(b64_data)
|
||||
data = urllib.unquote(data).decode('utf8')
|
||||
|
||||
_0x5da081 = [x for x in range(0x100)]
|
||||
if key:
|
||||
_0x5da081 = [x for x in range(0x100)]
|
||||
|
||||
for x in range(0x100):
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
|
||||
_0x139847 = _0x5da081[x]
|
||||
_0x5da081[x] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
for x in range(0x100):
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
|
||||
_0x139847 = _0x5da081[x]
|
||||
_0x5da081[x] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
|
||||
_0xb99338 = 0x0
|
||||
_0xb99338 = 0x0
|
||||
|
||||
for _0x11ebc5 in range(len(data)):
|
||||
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
|
||||
_0x139847 = _0x5da081[_0x25e3f4]
|
||||
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
|
||||
for _0x11ebc5 in range(len(data)):
|
||||
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
|
||||
_0x139847 = _0x5da081[_0x25e3f4]
|
||||
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
|
||||
|
||||
return result
|
||||
return result
|
||||
else:
|
||||
return data
|
||||
|
||||
def decode(self, url):
|
||||
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]
|
||||
|
||||
@@ -4,14 +4,12 @@ import re
|
||||
import base64
|
||||
import urllib
|
||||
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0']]
|
||||
host = "http://streamplay.to/"
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
@@ -26,24 +24,25 @@ def test_video_exists(page_url):
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
referer = re.sub(r"embed-|player-", "", page_url)[:-5]
|
||||
|
||||
referer = page_url.replace('iframe', 'preview')
|
||||
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
|
||||
|
||||
if data == "File was deleted":
|
||||
return "El archivo no existe o ha sido borrado"
|
||||
|
||||
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
_0xd003 = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
|
||||
var = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
|
||||
|
||||
video_urls = []
|
||||
url = scrapertools.find_single_match(unpacked, '(http[^,]+\.mp4)')
|
||||
itemlist.append([".mp4" + " [streamplay]", S(var).decode(url)])
|
||||
itemlist.sort(key=lambda x: x[0], reverse=True)
|
||||
|
||||
video_urls.append([".mp4" + " [streamplay]", S(_0xd003).decode(url)])
|
||||
|
||||
video_urls.sort(key=lambda x: x[0], reverse=True)
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
return itemlist
|
||||
|
||||
|
||||
class S:
|
||||
@@ -57,21 +56,21 @@ class S:
|
||||
self.d = None
|
||||
|
||||
var = eval(var)
|
||||
for x in range(0x1f0, 0, -1):
|
||||
for x in range(0xf8, 0, -1):
|
||||
var.append(var.pop(0))
|
||||
|
||||
self.var = var
|
||||
|
||||
self.t(
|
||||
self.decode_index('0x22', '!UJH') +
|
||||
self.decode_index('0x23', 'NpE)') +
|
||||
self.decode_index('0x24', '4uT2') +
|
||||
self.decode_index('0x23', 'NpE)'),
|
||||
self.decode_index('0x25', '@ZC2')
|
||||
self.decode_index('0xb') +
|
||||
self.decode_index('0xc') +
|
||||
self.decode_index('0xd') +
|
||||
self.decode_index('0xe'),
|
||||
self.decode_index('0xf')
|
||||
)
|
||||
|
||||
def decode_index(self, index, key):
|
||||
b64_data = self.var[int(index, 16)];
|
||||
def decode_index(self, index, key=None):
|
||||
b64_data = self.var[int(index, 16)]
|
||||
result = ''
|
||||
_0xb99338 = 0x0
|
||||
_0x25e3f4 = 0x0
|
||||
@@ -79,25 +78,28 @@ class S:
|
||||
data = base64.b64decode(b64_data)
|
||||
data = urllib.unquote(data).decode('utf8')
|
||||
|
||||
_0x5da081 = [x for x in range(0x100)]
|
||||
if key:
|
||||
_0x5da081 = [x for x in range(0x100)]
|
||||
|
||||
for x in range(0x100):
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
|
||||
_0x139847 = _0x5da081[x]
|
||||
_0x5da081[x] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
for x in range(0x100):
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
|
||||
_0x139847 = _0x5da081[x]
|
||||
_0x5da081[x] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
|
||||
_0xb99338 = 0x0
|
||||
_0xb99338 = 0x0
|
||||
|
||||
for _0x11ebc5 in range(len(data)):
|
||||
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
|
||||
_0x139847 = _0x5da081[_0x25e3f4]
|
||||
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
|
||||
for _0x11ebc5 in range(len(data)):
|
||||
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
|
||||
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
|
||||
_0x139847 = _0x5da081[_0x25e3f4]
|
||||
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
|
||||
_0x5da081[_0xb99338] = _0x139847
|
||||
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
|
||||
|
||||
return result
|
||||
return result
|
||||
else:
|
||||
return data
|
||||
|
||||
def decode(self, url):
|
||||
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]
|
||||
|
||||
Reference in New Issue
Block a user