Modificaciones gktools y timeout
This commit is contained in:
@@ -11,7 +11,7 @@ from core.item import Item
|
||||
from core import tmdb
|
||||
from platformcode import config,logger
|
||||
|
||||
import gktools
|
||||
import gktools, random, time, urllib
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt')
|
||||
|
||||
@@ -32,6 +32,10 @@ def mainlist(item):
|
||||
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
# ~ prova = 'EIpStovt0/tFerZM4pviHBzddH308TWRR41XnHv9N4tUNih6r+GjCDa/cV1uVUQXEmZd1Hsu2ABzESzBMGiK6XUdRgYZYHImmrIWcn3tfYPCuSWBc2UgwxUtR+WOsov6YiGM5AdgJGFunoN'
|
||||
# ~ aux = gktools.decode_rijndael(prova, preIV = 'b3512f4972d314da9', key='3e1a854e7d5835ab99d99a29afec8bbb')
|
||||
# ~ itemlist.append(Item(channel=item.channel, title=aux, action="novedades", url=HOST))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -160,20 +164,24 @@ def findvideos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
# ~ patron = 'Player\("(.*?)"'
|
||||
patron = 'iframe src="([^"]*)"'
|
||||
from collections import OrderedDict # cambiado dict por OrderedDict para mantener el mismo orden que en la web
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
matches = scrapertools.find_multiple_matches(data, '<li><a id="mirror(\d*)" class="link-veranime[^"]*" href="[^"]*">([^<]*)')
|
||||
d_links = OrderedDict(matches)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, 'if \(mirror == (\d*)\).*?iframe src="([^"]*)"')
|
||||
d_frames = OrderedDict(matches)
|
||||
|
||||
for k in d_links:
|
||||
if k in d_frames and d_frames[k] != '':
|
||||
tit = scrapertools.find_single_match(d_frames[k], '/([^\./]*)\.php\?')
|
||||
if tit == '':
|
||||
tit = 'mega' if 'mega.nz/' in d_frames[k] else 'dailymotion' if 'dailymotion.com/' in d_frames[k] else'noname'
|
||||
if tit == 'id' and 'yourupload.com/' in d_frames[k]: tit = 'yourupload'
|
||||
title = 'Opción %s (%s)' % (d_links[k], tit)
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=d_frames[k], referer=item.url))
|
||||
|
||||
for url in matches:
|
||||
title = scrapertools.find_single_match(url, '/([^\.]*)\.php\?')
|
||||
# ~ title = 'PDT'
|
||||
# ~ if "cldup" in url:
|
||||
# ~ title = "Opcion Cldup"
|
||||
# ~ if "chumi" in url:
|
||||
# ~ title = "Opcion Chumi"
|
||||
if title == 'rakuten': # de momento es el único resuelto
|
||||
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=url, referer=item.url))
|
||||
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support() and item.extra:
|
||||
@@ -186,14 +194,321 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if 'https://s2.animeyt.tv/rakuten.php?' in item.url:
|
||||
itemlist = gktools.gk_play(item)
|
||||
if item.url.startswith('https://www.dailymotion.com/'):
|
||||
itemlist.append(item.clone(url=item.url, server='dailymotion'))
|
||||
|
||||
elif item.url.startswith('https://mega.nz/'):
|
||||
itemlist.append(item.clone(url=item.url.replace('embed',''), server='mega'))
|
||||
|
||||
elif item.url.startswith('https://s2.animeyt.tv/rakuten.php?'):
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
if not gsv: return itemlist
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '3497510'
|
||||
|
||||
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
|
||||
|
||||
link, subtitle = gktools.get_play_link_id(data, item.url)
|
||||
|
||||
url = 'https://s2.animeyt.tv/rakuten/plugins/gkpluginsphp.php'
|
||||
post = "link=%s&token=%s" % (link, token)
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer, subtitle)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s3.animeyt.tv/amz.php?'):
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
v_token = scrapertools.find_single_match(data, "var v_token='([^']*)'")
|
||||
if not gsv or not v_token: return itemlist
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '9457610'
|
||||
|
||||
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
|
||||
|
||||
url = 'https://s3.animeyt.tv/amz_animeyts.php'
|
||||
post = "v_token=%s&token=%s&handler=%s" % (v_token, token, 'Animeyt')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s2.animeyt.tv/lola.php?'):
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_cd, s_file = scrapertools.find_single_match(data, "var cd='([^']*)';\s*var file='([^']*)'")
|
||||
if not gsv or not s_cd or not s_file: return itemlist
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '8134976'
|
||||
|
||||
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
|
||||
|
||||
url = 'https://s2.animeyt.tv/minha_animeyt.php'
|
||||
post = "cd=%s&file=%s&token=%s&handler=%s" % (s_cd, s_file, token, 'Animeyt')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s4.animeyt.tv/chumi.php?'): #https://s4.animeyt.tv/chumi.php?cd=3481&file=4
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_cd, s_file = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&file=([^&]*)')
|
||||
if not gsv or not s_cd or not s_file: return itemlist
|
||||
|
||||
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
|
||||
|
||||
gsv_bis = gktools.transforma_gsv(gsv, '159753')
|
||||
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
|
||||
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
|
||||
|
||||
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '147268278' + gsv[-5:]
|
||||
prefijo = gsv[:-5] + gsv_bis
|
||||
|
||||
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
|
||||
archive = gktools.toHex(token)
|
||||
|
||||
url = 'https://s4.animeyt.tv/minha/minha_animeyt.php'
|
||||
post = "cd=%s&id=%s&archive=%s&ip=%s&Japan=%s" % (s_cd, s_file, archive, ip, 'Asia')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s3.animeyt.tv/mega.php?'): #https://s3.animeyt.tv/mega.php?v=WmpHMEVLVTNZZktyaVAwai9sYzhWV1ZRTWh0WTZlNGZ3VzFVTXhMTkx2NGlOMjRYUHhZQlMvaUFsQlJFbHBVTA==
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_v = scrapertools.find_single_match(item.url, '\?v=([^&]*)')
|
||||
if not gsv or not s_v: return itemlist
|
||||
|
||||
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
|
||||
|
||||
gsv_bis = gktools.transforma_gsv(gsv, '159753')
|
||||
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
|
||||
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
|
||||
|
||||
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '147268278' + gsv[-5:]
|
||||
prefijo = gsv[:-5] + gsv_bis
|
||||
|
||||
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
|
||||
archive = gktools.toHex(token)
|
||||
|
||||
url = 'https://s3.animeyt.tv/mega_animeyts.php'
|
||||
post = "v=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_v, archive, item.url, ip, 'Asia')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s2.animeyt.tv/naruto/naruto.php?'): #https://s2.animeyt.tv/naruto/naruto.php?id=3477&file=11.mp4
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_id, s_file = scrapertools.find_single_match(item.url, '\?id=([^&]*)&file=([^&]*)')
|
||||
if not gsv or not s_id or not s_file: return itemlist
|
||||
|
||||
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
|
||||
|
||||
gsv_bis = gktools.transforma_gsv(gsv, '159753')
|
||||
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
|
||||
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
|
||||
|
||||
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '147268278' + gsv[-5:]
|
||||
prefijo = gsv[:-5] + gsv_bis
|
||||
|
||||
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
|
||||
archive = gktools.toHex(token)
|
||||
|
||||
url = 'https://s2.animeyt.tv/naruto/narutos_animeyt.php'
|
||||
post = "id=%s&file=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_id, s_file, archive, urllib.quote(item.url), ip, 'Asia')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s4.animeyt.tv/facebook.php?'): #https://s4.animeyt.tv/facebook.php?cd=3481&id=4
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_cd, s_id = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&id=([^&]*)')
|
||||
if not gsv or not s_cd or not s_id: return itemlist
|
||||
|
||||
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
|
||||
|
||||
gsv_bis = gktools.transforma_gsv(gsv, '159753')
|
||||
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
|
||||
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
|
||||
|
||||
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '147268278' + gsv[-5:]
|
||||
prefijo = gsv[:-5] + gsv_bis
|
||||
|
||||
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
|
||||
archive = gktools.toHex(token)
|
||||
|
||||
url = 'https://s4.animeyt.tv/facebook/facebook_animeyts.php'
|
||||
post = "cd=%s&id=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_cd, s_id, archive, urllib.quote(item.url), ip, 'Asia')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s.animeyt.tv/v4/media.php?'): #https://s.animeyt.tv/v4/media.php?id=SmdMQ2Y0NUhFK2hOZlYzbVJCbnE3QT09
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_id = scrapertools.find_single_match(item.url, '\?id=([^&]*)')
|
||||
if not gsv or not s_id: return itemlist
|
||||
|
||||
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
|
||||
|
||||
gsv_bis = gktools.transforma_gsv(gsv, '159753')
|
||||
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
|
||||
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
|
||||
|
||||
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '8049762' + gsv[-5:]
|
||||
prefijo = gsv[:-5] + gsv_bis
|
||||
|
||||
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
|
||||
archive = gktools.toHex(token)
|
||||
|
||||
url = 'https://s.animeyt.tv/v4/gsuite_animeyts.php'
|
||||
post = "id=%s&archive=%s&ip=%s&Japan=%s" % (s_id, archive, ip, 'Asia')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s10.animeyt.tv/yourupload.com/id.php?'): #https://s10.animeyt.tv/yourupload.com/id.php?id=62796D77774A4E4363326642
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_id = scrapertools.find_single_match(item.url, '\?id=([^&]*)')
|
||||
if not gsv or not s_id: return itemlist
|
||||
|
||||
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
|
||||
|
||||
gsv_bis = gktools.transforma_gsv(gsv, '159753')
|
||||
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
|
||||
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
|
||||
|
||||
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '8049762' + gsv[-5:]
|
||||
prefijo = gsv[:-5] + gsv_bis
|
||||
|
||||
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
|
||||
archive = gktools.toHex(token)
|
||||
|
||||
url = 'https://s10.animeyt.tv/yourupload.com/chinese_streaming.php'
|
||||
post = "id=%s&archive=%s&ip=%s&Japan=%s" % (s_id, archive, ip, 'Asia')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
elif item.url.startswith('https://s4.animeyt.tv/onedrive.php?'): #https://s4.animeyt.tv/onedrive.php?cd=3439&id=12
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
|
||||
s_cd, s_id = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&id=([^&]*)')
|
||||
if not gsv or not s_cd or not s_id: return itemlist
|
||||
|
||||
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
|
||||
|
||||
gsv_bis = gktools.transforma_gsv(gsv, '159753')
|
||||
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
|
||||
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
|
||||
|
||||
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '147268278' + gsv[-5:]
|
||||
prefijo = gsv[:-5] + gsv_bis
|
||||
|
||||
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
|
||||
archive = gktools.toHex(token)
|
||||
|
||||
url = 'https://s4.animeyt.tv/onedrive/onedrive_animeyts.php'
|
||||
post = "cd=%s&id=%s&archive=%s&ip=%s&Japan=%s" % (s_cd, s_id, archive, ip, 'Asia')
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck, item.url)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer)
|
||||
|
||||
|
||||
# PENDIENTE ANALIZAR DEMÁS CASOS...
|
||||
# ~ else:
|
||||
# ~ headers = {'Referer': item.referer}
|
||||
# ~ resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
# ~ with open('animeyt-play-%s.html' % item.title, 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -496,6 +496,7 @@ def findvideos(item):
|
||||
elif scrapedurl.startswith("https://load.pelispedia.vip/embed/"):
|
||||
if scrapedtitle == 'vid': scrapedtitle = 'vidoza'
|
||||
elif scrapedtitle == 'fast': scrapedtitle = 'fastplay'
|
||||
elif scrapedtitle == 'frem': scrapedtitle = 'fembed'
|
||||
title = "Ver video en [" + scrapedtitle + "]"
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="play", referer=item.url)
|
||||
itemlist.append(new_item)
|
||||
@@ -512,8 +513,58 @@ def findvideos(item):
|
||||
|
||||
def play(item):
|
||||
logger.info("url=%s" % item.url)
|
||||
itemlist = []
|
||||
|
||||
itemlist = gktools.gk_play(item)
|
||||
if item.url.startswith("https://pelispedia.video/v.php"):
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
if not gsv: return itemlist
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '2653'
|
||||
|
||||
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
|
||||
|
||||
link, subtitle = gktools.get_play_link_id(data, item.url)
|
||||
|
||||
url = 'https://pelispedia.video/plugins/ymovies.php' # cloupedia.php gkpedia.php
|
||||
post = "link=%s&token=%s" % (link, token)
|
||||
|
||||
# 3- Descargar json
|
||||
data = gktools.get_data_json(url, post, ck)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
itemlist = gktools.extraer_enlaces_json(data, item.referer, subtitle)
|
||||
|
||||
|
||||
elif item.url.startswith("https://load.pelispedia.vip/embed/"):
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
if not gsv: return itemlist
|
||||
|
||||
suto = gktools.md5_dominio(item.url)
|
||||
sufijo = '785446346'
|
||||
|
||||
token = gktools.generar_token(gsv, suto+'yt'+suto+sufijo)
|
||||
|
||||
url = item.url.replace('/embed/', '/stream/') + '/' + token
|
||||
|
||||
# 3- Descargar página
|
||||
data = gktools.get_data_with_cookie(url, item.url, ck)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
|
||||
srv = scrapertools.find_single_match(data, '<meta (?:name|property)="og:sitename" content="([^"]+)"')
|
||||
if srv == '' and 'rapidvideo.com/' in url: srv = 'rapidvideo'
|
||||
|
||||
if url != '' and srv != '':
|
||||
itemlist.append(item.clone(url=url, server=srv.lower()))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -304,6 +304,8 @@ def submenu_tools(item):
|
||||
thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(title='- Testear canales ...', channel="test", action="channel_test_selected"))
|
||||
itemlist.append(Item(title='- Testear servidores ...', channel="test", action="server_test_selected"))
|
||||
itemlist.append(Item(title='- Testear todos los canales!', channel="test", action="channel_test_all"))
|
||||
itemlist.append(Item(title='- Testear todos los servidores!', channel="test", action="server_test_all"))
|
||||
itemlist.append(Item(title='- Testear novedades!', channel="test", action="news_test_all"))
|
||||
itemlist.append(Item(title='- Upload tests to web!', channel="test", action="web_update_tests"))
|
||||
itemlist.append(
|
||||
|
||||
@@ -31,6 +31,9 @@ default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
|
||||
default_headers["Accept-Charset"] = "UTF-8"
|
||||
default_headers["Accept-Encoding"] = "gzip"
|
||||
|
||||
# Tiempo máximo de espera para downloadpage, si no se especifica nada
|
||||
HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = None
|
||||
|
||||
|
||||
def get_url_headers(url):
|
||||
domain_cookies = cj._cookies.get("." + urlparse.urlparse(url)[1], {}).get("/", {})
|
||||
@@ -122,6 +125,9 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
|
||||
url = urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]")
|
||||
|
||||
# Limitar tiempo de descarga si no se ha pasado timeout y hay un valor establecido en la variable global
|
||||
if timeout is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None: timeout = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
|
||||
|
||||
logger.info("----------------------------------------------")
|
||||
logger.info("downloadpage")
|
||||
logger.info("----------------------------------------------")
|
||||
|
||||
@@ -706,21 +706,27 @@ def filter_servers(servers_list):
|
||||
|
||||
return servers_list
|
||||
|
||||
def check_list_links(itemlist, numero):
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
# Comprobación de enlaces
|
||||
# -----------------------
|
||||
|
||||
def check_list_links(itemlist, numero='', timeout=3):
|
||||
"""
|
||||
Comprueba una lista de enlaces a videos y la devuelve modificando el titulo con la verificacion.
|
||||
El segundo parametro (numero) indica cuantos enlaces hay que verificar (0:5, 1:10, 2:15, 3:20)
|
||||
El parámetro numero indica cuantos enlaces hay que verificar (0:5, 1:10, 2:15, 3:20)
|
||||
El parámetro timeout indica un tope de espera para descargar la página
|
||||
"""
|
||||
numero = ((int(numero) + 1) * 5) if numero != '' else 10
|
||||
for it in itemlist:
|
||||
if numero > 0 and it.server != '' and it.url != '':
|
||||
verificacion = check_video_link(it.url, it.server)
|
||||
verificacion = check_video_link(it.url, it.server, timeout)
|
||||
it.title = verificacion + ', ' + it.title.strip()
|
||||
it.alive = verificacion
|
||||
numero -= 1
|
||||
return itemlist
|
||||
|
||||
def check_video_link(url, server):
|
||||
def check_video_link(url, server, timeout=3):
|
||||
"""
|
||||
Comprueba si el enlace a un video es valido y devuelve un string de 2 posiciones con la verificacion.
|
||||
:param url, server: Link y servidor
|
||||
@@ -734,17 +740,23 @@ def check_video_link(url, server):
|
||||
return "??"
|
||||
|
||||
if hasattr(server_module, 'test_video_exists'):
|
||||
ant_timeout = httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
|
||||
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = timeout # Limitar tiempo de descarga
|
||||
try:
|
||||
video_exists, message = server_module.test_video_exists(page_url=url)
|
||||
if not video_exists:
|
||||
logger.info("[check_video_link] No existe! %s %s %s" % (message, server, url))
|
||||
return "NO"
|
||||
resultado = "NO"
|
||||
else:
|
||||
logger.info("[check_video_link] comprovacion OK %s %s" % (server, url))
|
||||
return "Ok"
|
||||
logger.info("[check_video_link] comprobacion OK %s %s" % (server, url))
|
||||
resultado = "Ok"
|
||||
except:
|
||||
logger.info("[check_video_link] No se puede comprobar ahora! %s %s" % (server, url))
|
||||
return "??"
|
||||
resultado = "??"
|
||||
|
||||
finally:
|
||||
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restaurar tiempo de descarga
|
||||
return resultado
|
||||
|
||||
logger.info("[check_video_link] No hay test_video_exists para servidor: %s" % server)
|
||||
return "??"
|
||||
|
||||
@@ -1,145 +1,164 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gktools son un conjunto de funciones para ayudar a resolver enlaces a videos con "protección GK".
|
||||
Lo de protección gk dudo que exista, le he llamado así pq los primeros ejemplos vistos se eran gkpluginsphp y gkpedia.
|
||||
|
||||
Características "GK" :
|
||||
- Utiliza una cookie __cfduid
|
||||
- Utiliza meta name="google-site-verification" como texto a encriptar
|
||||
- La clave para encriptar se calcula en los js
|
||||
- Se calcula un token criptográfico en función del texto y clave
|
||||
- Calcula un token criptográfico en función de un texto y una clave
|
||||
- El texto se saca del html (por ejemplo de meta name="google-site-verification", pero puede ser más complejo)
|
||||
- La clave para encriptar se calcula en js ofuscados que carga el html
|
||||
- Se llama a otra url con una serie de parámetros, como el token, y de allí se obtienen los videos finales.
|
||||
|
||||
A partir de aquí 2 opciones:
|
||||
Howto:
|
||||
1- descargar página
|
||||
2- extraer datos y calcular los necesarios
|
||||
3- descargar segunda página con el token calculado
|
||||
4- extraer videos
|
||||
|
||||
a) Si la url indica que hay un /embed/
|
||||
- se cambia /embed/ por /stream/ y se añade /token
|
||||
- se descarga la página, dónde se pueden extraer los videos
|
||||
|
||||
b) Sino (enlaces directos)
|
||||
- se busca un identificador
|
||||
- si hay una llamada a Play() en el js, el id se saca de allí
|
||||
- sino el id puede estar en la url
|
||||
- con el identificador y el token se llama a un php (gkpluginsphp, gkpedia)
|
||||
- el php devuelve la lista de enlaces a los videos
|
||||
El paso 2 es con diferencia el más variable y depende mucho de cada web/servidor!
|
||||
Desofuscando los js se pueden ver los datos propios que necesita cada uno
|
||||
(el texto a encriptar, la clave a usar, la url dónde hay que llamar y los parámetros)
|
||||
|
||||
Notas:
|
||||
- Creado a partir de lo visto en pelispedia y animeyt, que utilizan este sistema.
|
||||
- Para otros canales habrá que añadir sus datos en las funciones calcular_*
|
||||
o hacer que estas funciones puedan extraer lo necesario de los js.
|
||||
Ver ejemplos en el código de los canales animeyt y pelispedia
|
||||
|
||||
|
||||
Created for Alfa-addon by Alfa Developers Team 2018
|
||||
'''
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
import urlparse
|
||||
import os, base64, json, hashlib, urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
# ~ from platformcode import logger
|
||||
from platformcode import logger
|
||||
from aadecode import decode as aadecode
|
||||
|
||||
def gk_play(item):
|
||||
# Descarga página y captura la petición de cookie
|
||||
# -----------------------------------------------
|
||||
def get_data_and_cookie(item, ck_name='__cfduid'):
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Descargar para tratar header y data por separado
|
||||
# ------------------------------------------------
|
||||
headers = {'Referer': item.referer}
|
||||
resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
# ~ with open('gk_play1.html', 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
# Obtener cookie __cfduid
|
||||
# -----------------------
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
|
||||
if ck:
|
||||
break
|
||||
if not ck: return itemlist
|
||||
ck_value = ''
|
||||
if ck_name != '':
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '%s=([^;]*)' % ck_name)
|
||||
if ck:
|
||||
ck_value = ck
|
||||
break
|
||||
|
||||
# Extraer datos y calcular token
|
||||
# ------------------------------
|
||||
gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
if not gsv: return itemlist
|
||||
|
||||
suto = calcular_sutorimux(item.url) # valor que se calcula en función del dominio
|
||||
sufijo = calcular_sufijo(item.url) # valor predeterminado que se establece en el código js
|
||||
|
||||
token = generar_token(gsv, suto+'yt'+suto+sufijo)
|
||||
return resp.data, ck_value
|
||||
|
||||
|
||||
# Descargar y extraer videos
|
||||
# --------------------------
|
||||
# Descarga página usando una cookie concreta
|
||||
# ------------------------------------------
|
||||
def get_data_with_cookie(url, ck_value='', referer='', ck_name='__cfduid'):
|
||||
|
||||
if '/embed/' in item.url:
|
||||
url = item.url.replace('/embed/', '/stream/') + '/' + token
|
||||
headers = {'Referer': item.url, 'Cookie': '__cfduid=' + ck}
|
||||
data = httptools.downloadpage(url, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play2.html', 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
# Extraer enlaces de la respuesta
|
||||
# -------------------------------
|
||||
url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
|
||||
srv = scrapertools.find_single_match(data, '<meta (?:name|property)="og:sitename" content="([^"]+)"')
|
||||
if srv == '' and 'rapidvideo.com/' in url: srv = 'rapidvideo'
|
||||
headers = {'Cookie': ck_name+'='+ck_value}
|
||||
if referer != '': headers['referer'] = referer
|
||||
data = httptools.downloadpage(url, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play2.html', 'w') as f: f.write(data); f.close()
|
||||
|
||||
if url != '' and srv != '':
|
||||
itemlist.append(item.clone(url=url, server=srv.lower()))
|
||||
|
||||
elif '<title>Vidoza</title>' in data or '|fastplay|' in data:
|
||||
if '|fastplay|' in data:
|
||||
packed = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(.*?)</script>")
|
||||
from lib import jsunpack
|
||||
data = jsunpack.unpack(packed)
|
||||
data = data.replace("\\'", "'")
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label\s*:\s*"([^"]+)"')
|
||||
subtitle = ''
|
||||
for fil, lbl in matches:
|
||||
if fil.endswith('.srt') and not fil.endswith('empty.srt'):
|
||||
subtitle = fil
|
||||
if not subtitle.startswith('http'):
|
||||
domi = scrapertools.find_single_match(data, 'aboutlink\s*:\s*"([^"]*)')
|
||||
subtitle = domi + subtitle
|
||||
break
|
||||
|
||||
for fil, lbl in matches:
|
||||
if not fil.endswith('.srt'):
|
||||
itemlist.append([lbl, fil, 0, subtitle])
|
||||
return data
|
||||
|
||||
|
||||
# Descarga json usando una cookie concreta
|
||||
# ----------------------------------------
|
||||
def get_data_json(url, post, ck_value='', referer='', ck_name='__cfduid'):
|
||||
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': ck_name+'='+ck_value}
|
||||
if referer != '': headers['referer'] = referer
|
||||
|
||||
data = httptools.downloadpage(url, post=post, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play3.html', 'w') as f: f.write(data); f.close()
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# Obtiene link de una llamada javascript Play() o de la url
|
||||
# ---------------------------------------------------------
|
||||
def get_play_link_id(data, url):
|
||||
|
||||
playparms = scrapertools.find_single_match(data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
|
||||
if playparms:
|
||||
link = playparms[0]
|
||||
subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
|
||||
else:
|
||||
playparms = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
|
||||
if playparms:
|
||||
link = playparms[0]
|
||||
subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
|
||||
subtitle = ''
|
||||
link = scrapertools.find_single_match(data, 'Play\("([^"]*)"')
|
||||
if not link:
|
||||
link = scrapertools.find_single_match(url, 'id=([^;]*)')
|
||||
|
||||
return link, subtitle
|
||||
|
||||
|
||||
# Extraer enlaces a videos de datos json
|
||||
# --------------------------------------
|
||||
def extraer_enlaces_json(data, referer, subtitle=''):
|
||||
itemlist = []
|
||||
|
||||
# Ejemplos:
|
||||
# {"Animeyt":[{"file":"https:\/\/storage.googleapis.com\/my-project-yt-195318.appspot.com\/slow.mp4","type":"mp4","label":"1080p"}]}
|
||||
# {"link":[{"link":"http:\/\/video8.narusaku.tv\/static\/720p\/2.1208982.2039540?md5=B64FKYNbFuWvxkGcSbtz2Q&expires=1528839657","label":"720p","type":"mp4"},{"link":"http:\/\/video5.narusaku.tv\/static\/480p\/2.1208982.2039540?md5=yhLG_3VghEUSd5YlCXOTBQ&expires=1528839657","label":"480p","type":"mp4","default":true},{"link":"http:\/\/video3.narusaku.tv\/static\/360p\/2.1208982.2039540?md5=vC0ZJkxRwV1rVBdeF7D4iA&expires=1528839657","label":"360p","type":"mp4"},{"link":"http:\/\/video2.narusaku.tv\/static\/240p\/2.1208982.2039540?md5=b-y_-rgrLMW7hJwFQSD8Tw&expires=1528839657","label":"240p","type":"mp4"}]}
|
||||
# {"link":"https:\/\/storage.googleapis.com\/cloudflare-caching-pelispedia.appspot.com\/cache\/16050.mp4","type":"mp4"}
|
||||
# {"Harbinger":[{"Harbinger":"...","type":"...","label":"..."}], ...}
|
||||
|
||||
data = data.replace('"Harbinger"', '"file"')
|
||||
|
||||
# Intentar como json
|
||||
# ------------------
|
||||
try:
|
||||
json_data = json.loads(data)
|
||||
enlaces = analizar_enlaces_json(json_data)
|
||||
for enlace in enlaces:
|
||||
url = enlace['link'] if 'link' in enlace else enlace['file']
|
||||
if not url.startswith('http'): url = aadecode(base64.b64decode(url)) # necesario para "Harbinger"
|
||||
if not url.startswith('http'): url = decode_rijndael(url) # post-"Harbinger" en algunos casos
|
||||
tit = ''
|
||||
if 'type' in enlace: tit += '[%s]' % enlace['type']
|
||||
if 'label' in enlace: tit += '[%s]' % enlace['label']
|
||||
if tit == '': tit = '.mp4'
|
||||
|
||||
itemlist.append([tit, corregir_url(url, referer), 0, subtitle])
|
||||
|
||||
# Sino, intentar como texto
|
||||
# -------------------------
|
||||
except:
|
||||
matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"')
|
||||
if matches:
|
||||
for url, lbl, typ in matches:
|
||||
itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, referer), 0, subtitle])
|
||||
else:
|
||||
subtitle = ''
|
||||
link = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)"')
|
||||
if not link:
|
||||
link = scrapertools.find_single_match(item.url, 'id=([^;]*)')
|
||||
|
||||
if link:
|
||||
# ~ logger.info('%s %s %s' % (item.url, link, token))
|
||||
url_gk = calcular_url_gk(item.url)
|
||||
|
||||
post = "link=%s&token=%s" % (link, token)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': '__cfduid=' + ck}
|
||||
|
||||
data = httptools.downloadpage(url_gk, post=post, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play3.html', 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
# Extraer enlaces de la respuesta
|
||||
# -------------------------------
|
||||
matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"')
|
||||
if matches:
|
||||
for url, lbl, typ in matches:
|
||||
itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, item.referer), 0, subtitle])
|
||||
else:
|
||||
url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"')
|
||||
if url:
|
||||
itemlist.append(['.mp4', corregir_url(url, item.referer), 0, subtitle])
|
||||
url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"')
|
||||
if url:
|
||||
itemlist.append(['.mp4', corregir_url(url, referer), 0, subtitle])
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# Función recursiva que busca videos en un diccionario
|
||||
# ----------------------------------------------------
|
||||
def analizar_enlaces_json(d):
|
||||
itemlist = []
|
||||
found = {}
|
||||
for k, v in d.iteritems():
|
||||
if k in ['file','link','type','label'] and not isinstance(v, list):
|
||||
found[k] = v
|
||||
|
||||
if isinstance(v, list):
|
||||
for l in v:
|
||||
if isinstance(l, dict): itemlist += analizar_enlaces_json(l)
|
||||
|
||||
if 'file' in found or 'link' in found:
|
||||
itemlist.append(found)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Correcciones en las urls finales obtenidas
|
||||
# ------------------------------------------
|
||||
@@ -149,6 +168,66 @@ def corregir_url(url, referer):
|
||||
return url
|
||||
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
# Conversion tipo hexa que hay en el js
|
||||
# -------------------------------------
|
||||
def toHex(txt):
|
||||
ret = ''
|
||||
for i in range(len(txt)):
|
||||
ret += str(hex(ord(txt[i]))).replace('x','')[-2:]
|
||||
return ret
|
||||
|
||||
|
||||
# Subrutinas de encriptación
|
||||
# --------------------------
|
||||
|
||||
def md5_dominio(url): # sutorimux/kubechi
|
||||
h = hashlib.md5(urlparse.urlparse(url).netloc)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def transforma_gsv(gsv, valor):
|
||||
llista = range(256)
|
||||
a = 0
|
||||
for i in range(256):
|
||||
a = (a + llista[i] + ord(gsv[i % len(gsv)]) ) % 256
|
||||
b = llista[i]
|
||||
llista[i] = llista[a]
|
||||
llista[a] = b
|
||||
|
||||
ret = ''
|
||||
a = 0; b= 0
|
||||
for i in range(len(valor)):
|
||||
a = (a + 1) % 256
|
||||
b = (b + llista[a]) % 256
|
||||
c = llista[a]
|
||||
llista[a] = llista[b]
|
||||
llista[b] = c
|
||||
ret += chr(ord(valor[i]) ^ llista[(llista[a] + llista[b]) % 256])
|
||||
|
||||
return base64.b64encode(ret)
|
||||
|
||||
|
||||
|
||||
# Codificar/Decodificar con Rijndael
|
||||
# ----------------------------------
|
||||
|
||||
def encode_rijndael(msg, IV, key):
|
||||
import rijndael
|
||||
return rijndael.cbc_encrypt(msg, IV, key)
|
||||
|
||||
|
||||
def decode_rijndael(txt, preIV='b3512f4972d314da9', key='3e1a854e7d5835ab99d99a29afec8bbb'):
|
||||
import rijndael
|
||||
msg = base64.b64decode(txt[:-15])
|
||||
IV = preIV + txt[-15:]
|
||||
deco = rijndael.cbc_decrypt(msg, IV, key)
|
||||
return deco.replace(chr(0), '')
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
@@ -159,6 +238,7 @@ def corregir_url(url, referer):
|
||||
# pwd: Password
|
||||
def generar_token(gsv, pwd):
|
||||
txt = obtener_cripto(pwd, gsv)
|
||||
# ~ logger.info('Texto pre token %s' % txt)
|
||||
|
||||
_0x382d28 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
|
||||
|
||||
@@ -182,18 +262,17 @@ def generar_token(gsv, pwd):
|
||||
|
||||
|
||||
def obtener_cripto(password, plaintext):
|
||||
import os, base64, json
|
||||
SALT_LENGTH = 8
|
||||
BLOCK_SIZE = 16
|
||||
KEY_SIZE = 32
|
||||
|
||||
salt = os.urandom(SALT_LENGTH)
|
||||
iv = os.urandom(BLOCK_SIZE)
|
||||
|
||||
paddingLength = 16 - (len(plaintext) % 16)
|
||||
paddedPlaintext = plaintext+chr(paddingLength)*paddingLength
|
||||
salt = os.urandom(8)
|
||||
|
||||
paddingLength = len(plaintext) % 16
|
||||
if paddingLength == 0:
|
||||
paddedPlaintext = plaintext
|
||||
else:
|
||||
dif = 16 - paddingLength
|
||||
paddedPlaintext = plaintext + chr(dif)*dif
|
||||
|
||||
kdf = evpKDF(password, salt)
|
||||
iv = kdf['iv']
|
||||
|
||||
try: # Intentar con librería AES del sistema
|
||||
from Crypto.Cipher import AES
|
||||
@@ -207,7 +286,6 @@ def obtener_cripto(password, plaintext):
|
||||
|
||||
|
||||
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
|
||||
import hashlib
|
||||
target_key_size = key_size + iv_size
|
||||
derived_bytes = ""
|
||||
number_of_derived_words = 0
|
||||
@@ -235,63 +313,3 @@ def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md
|
||||
"key": derived_bytes[0: key_size * 4],
|
||||
"iv": derived_bytes[key_size * 4:]
|
||||
}
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
# Valores extraídos de los js para los dominios tratados (pendiente automatizar!)
|
||||
# Ej: https://pelispedia.video/plugins/gkpluginsphp.js?v=3.3
|
||||
# Ej: https://s2.animeyt.tv/rakuten/plugins/rakuten676.js?v=200000000
|
||||
|
||||
def calcular_sutorimux(url):
|
||||
dominio = urlparse.urlparse(url).netloc
|
||||
claves = {
|
||||
'pelispedia.video': 'b0a8c83650f18ccc7c87b16e3c460474',
|
||||
'load.pelispedia.vip': '4fe554b59d760c9986c903b07af8b7a4',
|
||||
|
||||
's1.animeyt.tv': '0cdf0d0302091bc22a0afdc3f13c0773',
|
||||
's2.animeyt.tv': '079c3ee3ca289af95d819d93b852ed94',
|
||||
's3.animeyt.tv': '6c21a435bce9f5926d26db567fee1241',
|
||||
's4.animeyt.tv': '38546fb4797f2f7c5b6690a5b4a47e34',
|
||||
's10.animeyt.tv': 'be88e4cc014c0ae6f9f2d1f947b3b23b',
|
||||
's.animeyt.tv': '49f911abffe682820dc5b54777713974',
|
||||
'server.animeyt.tv': '2c60637d7f7aa54225c20aea61a2b468',
|
||||
'api.animeyt.tv': '54092dea9fd2e163aaa59ad0c4351866',
|
||||
}
|
||||
return '' if dominio not in claves else claves[dominio]
|
||||
|
||||
|
||||
def calcular_sufijo(url):
|
||||
dominio = urlparse.urlparse(url).netloc
|
||||
claves = {
|
||||
'pelispedia.video': '2653',
|
||||
'load.pelispedia.vip': '785446346',
|
||||
|
||||
's1.animeyt.tv': '',
|
||||
's2.animeyt.tv': '3497510',
|
||||
's3.animeyt.tv': '',
|
||||
's4.animeyt.tv': '',
|
||||
's10.animeyt.tv': '',
|
||||
's.animeyt.tv': '',
|
||||
'server.animeyt.tv': '',
|
||||
'api.animeyt.tv': '',
|
||||
}
|
||||
return '' if dominio not in claves else claves[dominio]
|
||||
|
||||
|
||||
def calcular_url_gk(url):
|
||||
dominio = urlparse.urlparse(url).netloc
|
||||
claves = {
|
||||
'pelispedia.video': 'https://pelispedia.video/plugins/cloupedia.php', # plugins/gkpedia.php
|
||||
'load.pelispedia.vip': '',
|
||||
|
||||
's1.animeyt.tv': '',
|
||||
's2.animeyt.tv': 'https://s2.animeyt.tv/rakuten/plugins/gkpluginsphp.php',
|
||||
's3.animeyt.tv': '',
|
||||
's4.animeyt.tv': '',
|
||||
's10.animeyt.tv': '',
|
||||
's.animeyt.tv': '',
|
||||
'server.animeyt.tv': '',
|
||||
'api.animeyt.tv': '',
|
||||
}
|
||||
return '' if dominio not in claves else claves[dominio]
|
||||
|
||||
23
plugin.video.alfa/lib/rijndael/__init__.py
Normal file
23
plugin.video.alfa/lib/rijndael/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from rijndael import rijndael
|
||||
from rijndael_cbc import zeropad, cbc
|
||||
|
||||
import base64
|
||||
|
||||
|
||||
def cbc_encrypt(msg, IV, key, size=32):
|
||||
|
||||
r = rijndael(key, size)
|
||||
pad = zeropad(size)
|
||||
cri = cbc(pad, r, IV)
|
||||
encod = cri.encrypt(msg)
|
||||
|
||||
return encod #.encode('hex')
|
||||
|
||||
|
||||
def cbc_decrypt(msg, IV, key, size=32):
|
||||
|
||||
r = rijndael(key, size)
|
||||
pad = zeropad(size)
|
||||
cri = cbc(pad, r, IV)
|
||||
|
||||
return cri.decrypt(msg)
|
||||
361
plugin.video.alfa/lib/rijndael/rijndael.py
Normal file
361
plugin.video.alfa/lib/rijndael/rijndael.py
Normal file
@@ -0,0 +1,361 @@
|
||||
"""
|
||||
A pure python (slow) implementation of rijndael with a decent interface
|
||||
|
||||
To include -
|
||||
|
||||
from rijndael import rijndael
|
||||
|
||||
To do a key setup -
|
||||
|
||||
r = rijndael(key, block_size = 16)
|
||||
|
||||
key must be a string of length 16, 24, or 32
|
||||
blocksize must be 16, 24, or 32. Default is 16
|
||||
|
||||
To use -
|
||||
|
||||
ciphertext = r.encrypt(plaintext)
|
||||
plaintext = r.decrypt(ciphertext)
|
||||
|
||||
If any strings are of the wrong length a ValueError is thrown
|
||||
"""
|
||||
|
||||
# ported from the Java reference code by Bram Cohen, April 2001
|
||||
# this code is public domain, unless someone makes
|
||||
# an intellectual property claim against the reference
|
||||
# code, in which case it can be made public domain by
|
||||
# deleting all the comments and renaming all the variables
|
||||
|
||||
import copy
|
||||
import string
|
||||
|
||||
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
|
||||
[[0, 0], [1, 5], [2, 4], [3, 3]],
|
||||
[[0, 0], [1, 7], [3, 5], [4, 4]]]
|
||||
|
||||
# [keysize][block_size]
|
||||
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
|
||||
|
||||
A = [[1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1],
|
||||
[1, 0, 0, 0, 1, 1, 1, 1],
|
||||
[1, 1, 0, 0, 0, 1, 1, 1],
|
||||
[1, 1, 1, 0, 0, 0, 1, 1],
|
||||
[1, 1, 1, 1, 0, 0, 0, 1]]
|
||||
|
||||
# produce log and alog tables, needed for multiplying in the
|
||||
# field GF(2^m) (generator = 3)
|
||||
alog = [1]
|
||||
for i in range(255):
|
||||
j = (alog[-1] << 1) ^ alog[-1]
|
||||
if j & 0x100 != 0:
|
||||
j ^= 0x11B
|
||||
alog.append(j)
|
||||
|
||||
log = [0] * 256
|
||||
for i in range(1, 255):
|
||||
log[alog[i]] = i
|
||||
|
||||
# multiply two elements of GF(2^m)
|
||||
def mul(a, b):
|
||||
if a == 0 or b == 0:
|
||||
return 0
|
||||
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
|
||||
|
||||
# substitution box based on F^{-1}(x)
|
||||
box = [[0] * 8 for i in range(256)]
|
||||
box[1][7] = 1
|
||||
for i in range(2, 256):
|
||||
j = alog[255 - log[i]]
|
||||
for t in range(8):
|
||||
box[i][t] = (j >> (7 - t)) & 0x01
|
||||
|
||||
B = [0, 1, 1, 0, 0, 0, 1, 1]
|
||||
|
||||
# affine transform: box[i] <- B + A*box[i]
|
||||
cox = [[0] * 8 for i in range(256)]
|
||||
for i in range(256):
|
||||
for t in range(8):
|
||||
cox[i][t] = B[t]
|
||||
for j in range(8):
|
||||
cox[i][t] ^= A[t][j] * box[i][j]
|
||||
|
||||
# S-boxes and inverse S-boxes
|
||||
S = [0] * 256
|
||||
Si = [0] * 256
|
||||
for i in range(256):
|
||||
S[i] = cox[i][0] << 7
|
||||
for t in range(1, 8):
|
||||
S[i] ^= cox[i][t] << (7-t)
|
||||
Si[S[i] & 0xFF] = i
|
||||
|
||||
# T-boxes
|
||||
G = [[2, 1, 1, 3],
|
||||
[3, 2, 1, 1],
|
||||
[1, 3, 2, 1],
|
||||
[1, 1, 3, 2]]
|
||||
|
||||
AA = [[0] * 8 for i in range(4)]
|
||||
|
||||
for i in range(4):
|
||||
for j in range(4):
|
||||
AA[i][j] = G[i][j]
|
||||
AA[i][i+4] = 1
|
||||
|
||||
for i in range(4):
|
||||
pivot = AA[i][i]
|
||||
if pivot == 0:
|
||||
t = i + 1
|
||||
while AA[t][i] == 0 and t < 4:
|
||||
t += 1
|
||||
assert t != 4, 'G matrix must be invertible'
|
||||
for j in range(8):
|
||||
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
|
||||
pivot = AA[i][i]
|
||||
for j in range(8):
|
||||
if AA[i][j] != 0:
|
||||
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
|
||||
for t in range(4):
|
||||
if i != t:
|
||||
for j in range(i+1, 8):
|
||||
AA[t][j] ^= mul(AA[i][j], AA[t][i])
|
||||
AA[t][i] = 0
|
||||
|
||||
iG = [[0] * 4 for i in range(4)]
|
||||
|
||||
for i in range(4):
|
||||
for j in range(4):
|
||||
iG[i][j] = AA[i][j + 4]
|
||||
|
||||
def mul4(a, bs):
|
||||
if a == 0:
|
||||
return 0
|
||||
r = 0
|
||||
for b in bs:
|
||||
r <<= 8
|
||||
if b != 0:
|
||||
r = r | mul(a, b)
|
||||
return r
|
||||
|
||||
T1 = []
|
||||
T2 = []
|
||||
T3 = []
|
||||
T4 = []
|
||||
T5 = []
|
||||
T6 = []
|
||||
T7 = []
|
||||
T8 = []
|
||||
U1 = []
|
||||
U2 = []
|
||||
U3 = []
|
||||
U4 = []
|
||||
|
||||
for t in range(256):
|
||||
s = S[t]
|
||||
T1.append(mul4(s, G[0]))
|
||||
T2.append(mul4(s, G[1]))
|
||||
T3.append(mul4(s, G[2]))
|
||||
T4.append(mul4(s, G[3]))
|
||||
|
||||
s = Si[t]
|
||||
T5.append(mul4(s, iG[0]))
|
||||
T6.append(mul4(s, iG[1]))
|
||||
T7.append(mul4(s, iG[2]))
|
||||
T8.append(mul4(s, iG[3]))
|
||||
|
||||
U1.append(mul4(t, iG[0]))
|
||||
U2.append(mul4(t, iG[1]))
|
||||
U3.append(mul4(t, iG[2]))
|
||||
U4.append(mul4(t, iG[3]))
|
||||
|
||||
# round constants
|
||||
rcon = [1]
|
||||
r = 1
|
||||
for t in range(1, 30):
|
||||
r = mul(2, r)
|
||||
rcon.append(r)
|
||||
|
||||
del A
|
||||
del AA
|
||||
del pivot
|
||||
del B
|
||||
del G
|
||||
del box
|
||||
del log
|
||||
del alog
|
||||
del i
|
||||
del j
|
||||
del r
|
||||
del s
|
||||
del t
|
||||
del mul
|
||||
del mul4
|
||||
del cox
|
||||
del iG
|
||||
|
||||
class rijndael:
|
||||
def __init__(self, key, block_size = 16):
|
||||
if block_size != 16 and block_size != 24 and block_size != 32:
|
||||
raise ValueError('Invalid block size: ' + str(block_size))
|
||||
if len(key) != 16 and len(key) != 24 and len(key) != 32:
|
||||
raise ValueError('Invalid key size: ' + str(len(key)))
|
||||
self.block_size = block_size
|
||||
|
||||
ROUNDS = num_rounds[len(key)][block_size]
|
||||
BC = block_size // 4
|
||||
# encryption round keys
|
||||
Ke = [[0] * BC for i in range(ROUNDS + 1)]
|
||||
# decryption round keys
|
||||
Kd = [[0] * BC for i in range(ROUNDS + 1)]
|
||||
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
|
||||
KC = len(key) // 4
|
||||
|
||||
# copy user material bytes into temporary ints
|
||||
tk = []
|
||||
for i in range(0, KC):
|
||||
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
|
||||
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
|
||||
|
||||
# copy values into round key arrays
|
||||
t = 0
|
||||
j = 0
|
||||
while j < KC and t < ROUND_KEY_COUNT:
|
||||
Ke[t // BC][t % BC] = tk[j]
|
||||
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
|
||||
j += 1
|
||||
t += 1
|
||||
tt = 0
|
||||
rconpointer = 0
|
||||
while t < ROUND_KEY_COUNT:
|
||||
# extrapolate using phi (the round key evolution function)
|
||||
tt = tk[KC - 1]
|
||||
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
|
||||
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
|
||||
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
|
||||
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
|
||||
(rcon[rconpointer] & 0xFF) << 24
|
||||
rconpointer += 1
|
||||
if KC != 8:
|
||||
for i in range(1, KC):
|
||||
tk[i] ^= tk[i-1]
|
||||
else:
|
||||
for i in range(1, KC // 2):
|
||||
tk[i] ^= tk[i-1]
|
||||
tt = tk[KC // 2 - 1]
|
||||
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
|
||||
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
|
||||
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
|
||||
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
|
||||
for i in range(KC // 2 + 1, KC):
|
||||
tk[i] ^= tk[i-1]
|
||||
# copy values into round key arrays
|
||||
j = 0
|
||||
while j < KC and t < ROUND_KEY_COUNT:
|
||||
Ke[t // BC][t % BC] = tk[j]
|
||||
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
|
||||
j += 1
|
||||
t += 1
|
||||
# inverse MixColumn where needed
|
||||
for r in range(1, ROUNDS):
|
||||
for j in range(BC):
|
||||
tt = Kd[r][j]
|
||||
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
|
||||
U2[(tt >> 16) & 0xFF] ^ \
|
||||
U3[(tt >> 8) & 0xFF] ^ \
|
||||
U4[ tt & 0xFF]
|
||||
self.Ke = Ke
|
||||
self.Kd = Kd
|
||||
|
||||
def encrypt(self, plaintext):
|
||||
if len(plaintext) != self.block_size:
|
||||
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
|
||||
Ke = self.Ke
|
||||
|
||||
BC = self.block_size // 4
|
||||
ROUNDS = len(Ke) - 1
|
||||
if BC == 4:
|
||||
SC = 0
|
||||
elif BC == 6:
|
||||
SC = 1
|
||||
else:
|
||||
SC = 2
|
||||
s1 = shifts[SC][1][0]
|
||||
s2 = shifts[SC][2][0]
|
||||
s3 = shifts[SC][3][0]
|
||||
a = [0] * BC
|
||||
# temporary work array
|
||||
t = []
|
||||
# plaintext to ints + key
|
||||
for i in range(BC):
|
||||
t.append((ord(plaintext[i * 4 ]) << 24 |
|
||||
ord(plaintext[i * 4 + 1]) << 16 |
|
||||
ord(plaintext[i * 4 + 2]) << 8 |
|
||||
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
|
||||
# apply round transforms
|
||||
for r in range(1, ROUNDS):
|
||||
for i in range(BC):
|
||||
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
|
||||
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
|
||||
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
|
||||
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
|
||||
t = copy.copy(a)
|
||||
# last round is special
|
||||
result = []
|
||||
for i in range(BC):
|
||||
tt = Ke[ROUNDS][i]
|
||||
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
|
||||
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
|
||||
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
|
||||
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
|
||||
return ''.join(map(chr, result))
|
||||
|
||||
def decrypt(self, ciphertext):
|
||||
if len(ciphertext) != self.block_size:
|
||||
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
|
||||
Kd = self.Kd
|
||||
|
||||
BC = self.block_size // 4
|
||||
ROUNDS = len(Kd) - 1
|
||||
if BC == 4:
|
||||
SC = 0
|
||||
elif BC == 6:
|
||||
SC = 1
|
||||
else:
|
||||
SC = 2
|
||||
s1 = shifts[SC][1][1]
|
||||
s2 = shifts[SC][2][1]
|
||||
s3 = shifts[SC][3][1]
|
||||
a = [0] * BC
|
||||
# temporary work array
|
||||
t = [0] * BC
|
||||
# ciphertext to ints + key
|
||||
for i in range(BC):
|
||||
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
|
||||
ord(ciphertext[i * 4 + 1]) << 16 |
|
||||
ord(ciphertext[i * 4 + 2]) << 8 |
|
||||
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
|
||||
# apply round transforms
|
||||
for r in range(1, ROUNDS):
|
||||
for i in range(BC):
|
||||
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
|
||||
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
|
||||
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
|
||||
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
|
||||
t = copy.copy(a)
|
||||
# last round is special
|
||||
result = []
|
||||
for i in range(BC):
|
||||
tt = Kd[ROUNDS][i]
|
||||
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
|
||||
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
|
||||
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
|
||||
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
|
||||
return ''.join(map(chr, result))
|
||||
|
||||
def encrypt(key, block):
|
||||
return rijndael(key, len(block)).encrypt(block)
|
||||
|
||||
def decrypt(key, block):
|
||||
return rijndael(key, len(block)).decrypt(block)
|
||||
71
plugin.video.alfa/lib/rijndael/rijndael_cbc.py
Normal file
71
plugin.video.alfa/lib/rijndael/rijndael_cbc.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# https://stackoverflow.com/questions/8356689/python-equivalent-of-phps-mcrypt-rijndael-256-cbc
|
||||
|
||||
class zeropad:
|
||||
|
||||
def __init__(self, block_size):
|
||||
assert block_size > 0 and block_size < 256
|
||||
self.block_size = block_size
|
||||
|
||||
def pad(self, pt):
|
||||
ptlen = len(pt)
|
||||
padsize = self.block_size - ((ptlen + self.block_size - 1) % self.block_size + 1)
|
||||
return pt + "\0" * padsize
|
||||
|
||||
def unpad(self, ppt):
|
||||
assert len(ppt) % self.block_size == 0
|
||||
offset = len(ppt)
|
||||
if (offset == 0):
|
||||
return ''
|
||||
end = offset - self.block_size + 1
|
||||
while (offset > end):
|
||||
offset -= 1;
|
||||
if (ppt[offset] != "\0"):
|
||||
return ppt[:offset + 1]
|
||||
assert false
|
||||
|
||||
class cbc:
|
||||
|
||||
def __init__(self, padding, cipher, iv):
|
||||
assert padding.block_size == cipher.block_size;
|
||||
assert len(iv) == cipher.block_size;
|
||||
self.padding = padding
|
||||
self.cipher = cipher
|
||||
self.iv = iv
|
||||
|
||||
def encrypt(self, pt):
|
||||
ppt = self.padding.pad(pt)
|
||||
offset = 0
|
||||
ct = ''
|
||||
v = self.iv
|
||||
while (offset < len(ppt)):
|
||||
block = ppt[offset:offset + self.cipher.block_size]
|
||||
block = self.xorblock(block, v)
|
||||
block = self.cipher.encrypt(block)
|
||||
ct += block
|
||||
offset += self.cipher.block_size
|
||||
v = block
|
||||
return ct;
|
||||
|
||||
def decrypt(self, ct):
|
||||
assert len(ct) % self.cipher.block_size == 0
|
||||
ppt = ''
|
||||
offset = 0
|
||||
v = self.iv
|
||||
while (offset < len(ct)):
|
||||
block = ct[offset:offset + self.cipher.block_size]
|
||||
decrypted = self.cipher.decrypt(block)
|
||||
ppt += self.xorblock(decrypted, v)
|
||||
offset += self.cipher.block_size
|
||||
v = block
|
||||
pt = self.padding.unpad(ppt)
|
||||
return pt;
|
||||
|
||||
def xorblock(self, b1, b2):
|
||||
# sorry, not very Pythonesk
|
||||
i = 0
|
||||
r = '';
|
||||
while (i < self.cipher.block_size):
|
||||
r += chr(ord(b1[i]) ^ ord(b2[i]))
|
||||
i += 1
|
||||
return r
|
||||
|
||||
Reference in New Issue
Block a user