diff --git a/plugin.video.alfa/channels/animeyt.py b/plugin.video.alfa/channels/animeyt.py index c8810dca..85925a62 100644 --- a/plugin.video.alfa/channels/animeyt.py +++ b/plugin.video.alfa/channels/animeyt.py @@ -11,7 +11,7 @@ from core.item import Item from core import tmdb from platformcode import config,logger -import gktools +import gktools, random, time, urllib __modo_grafico__ = config.get_setting('modo_grafico', 'animeyt') @@ -32,6 +32,10 @@ def mainlist(item): itemlist = renumbertools.show_option(item.channel, itemlist) + # ~ prova = 'EIpStovt0/tFerZM4pviHBzddH308TWRR41XnHv9N4tUNih6r+GjCDa/cV1uVUQXEmZd1Hsu2ABzESzBMGiK6XUdRgYZYHImmrIWcn3tfYPCuSWBc2UgwxUtR+WOsov6YiGM5AdgJGFunoN' + # ~ aux = gktools.decode_rijndael(prova, preIV = 'b3512f4972d314da9', key='3e1a854e7d5835ab99d99a29afec8bbb') + # ~ itemlist.append(Item(channel=item.channel, title=aux, action="novedades", url=HOST)) + return itemlist @@ -160,20 +164,24 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) - # ~ patron = 'Player\("(.*?)"' - patron = 'iframe src="([^"]*)"' + from collections import OrderedDict # cambiado dict por OrderedDict para mantener el mismo orden que en la web - matches = scrapertools.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(data, '
  • ([^<]*)') + d_links = OrderedDict(matches) + + matches = scrapertools.find_multiple_matches(data, 'if \(mirror == (\d*)\).*?iframe src="([^"]*)"') + d_frames = OrderedDict(matches) + + for k in d_links: + if k in d_frames and d_frames[k] != '': + tit = scrapertools.find_single_match(d_frames[k], '/([^\./]*)\.php\?') + if tit == '': + tit = 'mega' if 'mega.nz/' in d_frames[k] else 'dailymotion' if 'dailymotion.com/' in d_frames[k] else'noname' + if tit == 'id' and 'yourupload.com/' in d_frames[k]: tit = 'yourupload' + title = 'Opción %s (%s)' % (d_links[k], tit) + + itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=d_frames[k], referer=item.url)) - for url in matches: - title = scrapertools.find_single_match(url, '/([^\.]*)\.php\?') - # ~ title = 'PDT' - # ~ if "cldup" in url: - # ~ title = "Opcion Cldup" - # ~ if "chumi" in url: - # ~ title = "Opcion Chumi" - if title == 'rakuten': # de momento es el único resuelto - itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=url, referer=item.url)) if item.extra != "library": if config.get_videolibrary_support() and item.extra: @@ -186,14 +194,321 @@ def play(item): logger.info() itemlist = [] - if 'https://s2.animeyt.tv/rakuten.php?' in item.url: - itemlist = gktools.gk_play(item) + if item.url.startswith('https://www.dailymotion.com/'): + itemlist.append(item.clone(url=item.url, server='dailymotion')) + + elif item.url.startswith('https://mega.nz/'): + itemlist.append(item.clone(url=item.url.replace('embed',''), server='mega')) + + elif item.url.startswith('https://s2.animeyt.tv/rakuten.php?'): + # 1- Descargar + data, ck = gktools.get_data_and_cookie(item) + + # 2- Calcular datos + gsv = scrapertools.find_single_match(data, ' 0 and it.server != '' and it.url != '': - verificacion = check_video_link(it.url, it.server) + verificacion = check_video_link(it.url, it.server, timeout) it.title = verificacion + ', ' + it.title.strip() it.alive = verificacion numero -= 1 return itemlist -def check_video_link(url, server): +def check_video_link(url, server, timeout=3): """ Comprueba si el enlace a un video es valido y devuelve un string de 2 posiciones con la verificacion. :param url, server: Link y servidor @@ -734,17 +740,23 @@ def check_video_link(url, server): return "??" if hasattr(server_module, 'test_video_exists'): + ant_timeout = httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT + httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = timeout # Limitar tiempo de descarga try: video_exists, message = server_module.test_video_exists(page_url=url) if not video_exists: logger.info("[check_video_link] No existe! %s %s %s" % (message, server, url)) - return "NO" + resultado = "NO" else: - logger.info("[check_video_link] comprovacion OK %s %s" % (server, url)) - return "Ok" + logger.info("[check_video_link] comprobacion OK %s %s" % (server, url)) + resultado = "Ok" except: logger.info("[check_video_link] No se puede comprobar ahora! %s %s" % (server, url)) - return "??" + resultado = "??" + + finally: + httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restaurar tiempo de descarga + return resultado logger.info("[check_video_link] No hay test_video_exists para servidor: %s" % server) return "??" diff --git a/plugin.video.alfa/lib/gktools.py b/plugin.video.alfa/lib/gktools.py index c71c0909..59585389 100644 --- a/plugin.video.alfa/lib/gktools.py +++ b/plugin.video.alfa/lib/gktools.py @@ -1,145 +1,164 @@ # -*- coding: utf-8 -*- ''' +gktools son un conjunto de funciones para ayudar a resolver enlaces a videos con "protección GK". +Lo de protección gk dudo que exista, le he llamado así pq los primeros ejemplos vistos se eran gkpluginsphp y gkpedia. + Características "GK" : - Utiliza una cookie __cfduid -- Utiliza meta name="google-site-verification" como texto a encriptar -- La clave para encriptar se calcula en los js -- Se calcula un token criptográfico en función del texto y clave +- Calcula un token criptográfico en función de un texto y una clave +- El texto se saca del html (por ejemplo de meta name="google-site-verification", pero puede ser más complejo) +- La clave para encriptar se calcula en js ofuscados que carga el html +- Se llama a otra url con una serie de parámetros, como el token, y de allí se obtienen los videos finales. -A partir de aquí 2 opciones: +Howto: +1- descargar página +2- extraer datos y calcular los necesarios +3- descargar segunda página con el token calculado +4- extraer videos -a) Si la url indica que hay un /embed/ - - se cambia /embed/ por /stream/ y se añade /token - - se descarga la página, dónde se pueden extraer los videos - -b) Sino (enlaces directos) - - se busca un identificador - - si hay una llamada a Play() en el js, el id se saca de allí - - sino el id puede estar en la url - - con el identificador y el token se llama a un php (gkpluginsphp, gkpedia) - - el php devuelve la lista de enlaces a los videos +El paso 2 es con diferencia el más variable y depende mucho de cada web/servidor! +Desofuscando los js se pueden ver los datos propios que necesita cada uno +(el texto a encriptar, la clave a usar, la url dónde hay que llamar y los parámetros) -Notas: -- Creado a partir de lo visto en pelispedia y animeyt, que utilizan este sistema. -- Para otros canales habrá que añadir sus datos en las funciones calcular_* - o hacer que estas funciones puedan extraer lo necesario de los js. +Ver ejemplos en el código de los canales animeyt y pelispedia + +Created for Alfa-addon by Alfa Developers Team 2018 ''' # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -import urlparse +import os, base64, json, hashlib, urlparse from core import httptools from core import scrapertools -# ~ from platformcode import logger +from platformcode import logger +from aadecode import decode as aadecode -def gk_play(item): +# Descarga página y captura la petición de cookie +# ----------------------------------------------- +def get_data_and_cookie(item, ck_name='__cfduid'): - itemlist = [] - - # Descargar para tratar header y data por separado - # ------------------------------------------------ headers = {'Referer': item.referer} resp = httptools.downloadpage(item.url, headers=headers, cookies=False) # ~ with open('gk_play1.html', 'w') as f: f.write(resp.data); f.close() - # Obtener cookie __cfduid - # ----------------------- - for h in resp.headers: - ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)') - if ck: - break - if not ck: return itemlist + ck_value = '' + if ck_name != '': + for h in resp.headers: + ck = scrapertools.find_single_match(resp.headers[h], '%s=([^;]*)' % ck_name) + if ck: + ck_value = ck + break - # Extraer datos y calcular token - # ------------------------------ - gsv = scrapertools.find_single_match(resp.data, 'Vidoza' in data or '|fastplay|' in data: - if '|fastplay|' in data: - packed = scrapertools.find_single_match(data, "") - from lib import jsunpack - data = jsunpack.unpack(packed) - data = data.replace("\\'", "'") - - matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label\s*:\s*"([^"]+)"') - subtitle = '' - for fil, lbl in matches: - if fil.endswith('.srt') and not fil.endswith('empty.srt'): - subtitle = fil - if not subtitle.startswith('http'): - domi = scrapertools.find_single_match(data, 'aboutlink\s*:\s*"([^"]*)') - subtitle = domi + subtitle - break - - for fil, lbl in matches: - if not fil.endswith('.srt'): - itemlist.append([lbl, fil, 0, subtitle]) + return data +# Descarga json usando una cookie concreta +# ---------------------------------------- +def get_data_json(url, post, ck_value='', referer='', ck_name='__cfduid'): + + headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': ck_name+'='+ck_value} + if referer != '': headers['referer'] = referer + + data = httptools.downloadpage(url, post=post, headers=headers, cookies=False).data + # ~ with open('gk_play3.html', 'w') as f: f.write(data); f.close() + + return data + + +# Obtiene link de una llamada javascript Play() o de la url +# --------------------------------------------------------- +def get_play_link_id(data, url): + + playparms = scrapertools.find_single_match(data, 'Play\("([^"]*)","([^"]*)","([^"]*)"') + if playparms: + link = playparms[0] + subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt' else: - playparms = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)","([^"]*)","([^"]*)"') - if playparms: - link = playparms[0] - subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt' + subtitle = '' + link = scrapertools.find_single_match(data, 'Play\("([^"]*)"') + if not link: + link = scrapertools.find_single_match(url, 'id=([^;]*)') + + return link, subtitle + + +# Extraer enlaces a videos de datos json +# -------------------------------------- +def extraer_enlaces_json(data, referer, subtitle=''): + itemlist = [] + + # Ejemplos: + # {"Animeyt":[{"file":"https:\/\/storage.googleapis.com\/my-project-yt-195318.appspot.com\/slow.mp4","type":"mp4","label":"1080p"}]} + # {"link":[{"link":"http:\/\/video8.narusaku.tv\/static\/720p\/2.1208982.2039540?md5=B64FKYNbFuWvxkGcSbtz2Q&expires=1528839657","label":"720p","type":"mp4"},{"link":"http:\/\/video5.narusaku.tv\/static\/480p\/2.1208982.2039540?md5=yhLG_3VghEUSd5YlCXOTBQ&expires=1528839657","label":"480p","type":"mp4","default":true},{"link":"http:\/\/video3.narusaku.tv\/static\/360p\/2.1208982.2039540?md5=vC0ZJkxRwV1rVBdeF7D4iA&expires=1528839657","label":"360p","type":"mp4"},{"link":"http:\/\/video2.narusaku.tv\/static\/240p\/2.1208982.2039540?md5=b-y_-rgrLMW7hJwFQSD8Tw&expires=1528839657","label":"240p","type":"mp4"}]} + # {"link":"https:\/\/storage.googleapis.com\/cloudflare-caching-pelispedia.appspot.com\/cache\/16050.mp4","type":"mp4"} + # {"Harbinger":[{"Harbinger":"...","type":"...","label":"..."}], ...} + + data = data.replace('"Harbinger"', '"file"') + + # Intentar como json + # ------------------ + try: + json_data = json.loads(data) + enlaces = analizar_enlaces_json(json_data) + for enlace in enlaces: + url = enlace['link'] if 'link' in enlace else enlace['file'] + if not url.startswith('http'): url = aadecode(base64.b64decode(url)) # necesario para "Harbinger" + if not url.startswith('http'): url = decode_rijndael(url) # post-"Harbinger" en algunos casos + tit = '' + if 'type' in enlace: tit += '[%s]' % enlace['type'] + if 'label' in enlace: tit += '[%s]' % enlace['label'] + if tit == '': tit = '.mp4' + + itemlist.append([tit, corregir_url(url, referer), 0, subtitle]) + + # Sino, intentar como texto + # ------------------------- + except: + matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"') + if matches: + for url, lbl, typ in matches: + itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, referer), 0, subtitle]) else: - subtitle = '' - link = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)"') - if not link: - link = scrapertools.find_single_match(item.url, 'id=([^;]*)') - - if link: - # ~ logger.info('%s %s %s' % (item.url, link, token)) - url_gk = calcular_url_gk(item.url) - - post = "link=%s&token=%s" % (link, token) - headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': '__cfduid=' + ck} - - data = httptools.downloadpage(url_gk, post=post, headers=headers, cookies=False).data - # ~ with open('gk_play3.html', 'w') as f: f.write(resp.data); f.close() - - # Extraer enlaces de la respuesta - # ------------------------------- - matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"') - if matches: - for url, lbl, typ in matches: - itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, item.referer), 0, subtitle]) - else: - url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"') - if url: - itemlist.append(['.mp4', corregir_url(url, item.referer), 0, subtitle]) + url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"') + if url: + itemlist.append(['.mp4', corregir_url(url, referer), 0, subtitle]) return itemlist -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Función recursiva que busca videos en un diccionario +# ---------------------------------------------------- +def analizar_enlaces_json(d): + itemlist = [] + found = {} + for k, v in d.iteritems(): + if k in ['file','link','type','label'] and not isinstance(v, list): + found[k] = v + + if isinstance(v, list): + for l in v: + if isinstance(l, dict): itemlist += analizar_enlaces_json(l) + + if 'file' in found or 'link' in found: + itemlist.append(found) + + return itemlist + # Correcciones en las urls finales obtenidas # ------------------------------------------ @@ -149,6 +168,66 @@ def corregir_url(url, referer): return url + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +# Conversion tipo hexa que hay en el js +# ------------------------------------- +def toHex(txt): + ret = '' + for i in range(len(txt)): + ret += str(hex(ord(txt[i]))).replace('x','')[-2:] + return ret + + +# Subrutinas de encriptación +# -------------------------- + +def md5_dominio(url): # sutorimux/kubechi + h = hashlib.md5(urlparse.urlparse(url).netloc) + return h.hexdigest() + + +def transforma_gsv(gsv, valor): + llista = range(256) + a = 0 + for i in range(256): + a = (a + llista[i] + ord(gsv[i % len(gsv)]) ) % 256 + b = llista[i] + llista[i] = llista[a] + llista[a] = b + + ret = '' + a = 0; b= 0 + for i in range(len(valor)): + a = (a + 1) % 256 + b = (b + llista[a]) % 256 + c = llista[a] + llista[a] = llista[b] + llista[b] = c + ret += chr(ord(valor[i]) ^ llista[(llista[a] + llista[b]) % 256]) + + return base64.b64encode(ret) + + + +# Codificar/Decodificar con Rijndael +# ---------------------------------- + +def encode_rijndael(msg, IV, key): + import rijndael + return rijndael.cbc_encrypt(msg, IV, key) + + +def decode_rijndael(txt, preIV='b3512f4972d314da9', key='3e1a854e7d5835ab99d99a29afec8bbb'): + import rijndael + msg = base64.b64decode(txt[:-15]) + IV = preIV + txt[-15:] + deco = rijndael.cbc_decrypt(msg, IV, key) + return deco.replace(chr(0), '') + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -159,6 +238,7 @@ def corregir_url(url, referer): # pwd: Password def generar_token(gsv, pwd): txt = obtener_cripto(pwd, gsv) + # ~ logger.info('Texto pre token %s' % txt) _0x382d28 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' @@ -182,18 +262,17 @@ def generar_token(gsv, pwd): def obtener_cripto(password, plaintext): - import os, base64, json - SALT_LENGTH = 8 - BLOCK_SIZE = 16 - KEY_SIZE = 32 - - salt = os.urandom(SALT_LENGTH) - iv = os.urandom(BLOCK_SIZE) - - paddingLength = 16 - (len(plaintext) % 16) - paddedPlaintext = plaintext+chr(paddingLength)*paddingLength + salt = os.urandom(8) + paddingLength = len(plaintext) % 16 + if paddingLength == 0: + paddedPlaintext = plaintext + else: + dif = 16 - paddingLength + paddedPlaintext = plaintext + chr(dif)*dif + kdf = evpKDF(password, salt) + iv = kdf['iv'] try: # Intentar con librería AES del sistema from Crypto.Cipher import AES @@ -207,7 +286,6 @@ def obtener_cripto(password, plaintext): def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"): - import hashlib target_key_size = key_size + iv_size derived_bytes = "" number_of_derived_words = 0 @@ -235,63 +313,3 @@ def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md "key": derived_bytes[0: key_size * 4], "iv": derived_bytes[key_size * 4:] } - - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -# Valores extraídos de los js para los dominios tratados (pendiente automatizar!) -# Ej: https://pelispedia.video/plugins/gkpluginsphp.js?v=3.3 -# Ej: https://s2.animeyt.tv/rakuten/plugins/rakuten676.js?v=200000000 - -def calcular_sutorimux(url): - dominio = urlparse.urlparse(url).netloc - claves = { - 'pelispedia.video': 'b0a8c83650f18ccc7c87b16e3c460474', - 'load.pelispedia.vip': '4fe554b59d760c9986c903b07af8b7a4', - - 's1.animeyt.tv': '0cdf0d0302091bc22a0afdc3f13c0773', - 's2.animeyt.tv': '079c3ee3ca289af95d819d93b852ed94', - 's3.animeyt.tv': '6c21a435bce9f5926d26db567fee1241', - 's4.animeyt.tv': '38546fb4797f2f7c5b6690a5b4a47e34', - 's10.animeyt.tv': 'be88e4cc014c0ae6f9f2d1f947b3b23b', - 's.animeyt.tv': '49f911abffe682820dc5b54777713974', - 'server.animeyt.tv': '2c60637d7f7aa54225c20aea61a2b468', - 'api.animeyt.tv': '54092dea9fd2e163aaa59ad0c4351866', - } - return '' if dominio not in claves else claves[dominio] - - -def calcular_sufijo(url): - dominio = urlparse.urlparse(url).netloc - claves = { - 'pelispedia.video': '2653', - 'load.pelispedia.vip': '785446346', - - 's1.animeyt.tv': '', - 's2.animeyt.tv': '3497510', - 's3.animeyt.tv': '', - 's4.animeyt.tv': '', - 's10.animeyt.tv': '', - 's.animeyt.tv': '', - 'server.animeyt.tv': '', - 'api.animeyt.tv': '', - } - return '' if dominio not in claves else claves[dominio] - - -def calcular_url_gk(url): - dominio = urlparse.urlparse(url).netloc - claves = { - 'pelispedia.video': 'https://pelispedia.video/plugins/cloupedia.php', # plugins/gkpedia.php - 'load.pelispedia.vip': '', - - 's1.animeyt.tv': '', - 's2.animeyt.tv': 'https://s2.animeyt.tv/rakuten/plugins/gkpluginsphp.php', - 's3.animeyt.tv': '', - 's4.animeyt.tv': '', - 's10.animeyt.tv': '', - 's.animeyt.tv': '', - 'server.animeyt.tv': '', - 'api.animeyt.tv': '', - } - return '' if dominio not in claves else claves[dominio] diff --git a/plugin.video.alfa/lib/rijndael/__init__.py b/plugin.video.alfa/lib/rijndael/__init__.py new file mode 100644 index 00000000..776f8091 --- /dev/null +++ b/plugin.video.alfa/lib/rijndael/__init__.py @@ -0,0 +1,23 @@ +from rijndael import rijndael +from rijndael_cbc import zeropad, cbc + +import base64 + + +def cbc_encrypt(msg, IV, key, size=32): + + r = rijndael(key, size) + pad = zeropad(size) + cri = cbc(pad, r, IV) + encod = cri.encrypt(msg) + + return encod #.encode('hex') + + +def cbc_decrypt(msg, IV, key, size=32): + + r = rijndael(key, size) + pad = zeropad(size) + cri = cbc(pad, r, IV) + + return cri.decrypt(msg) diff --git a/plugin.video.alfa/lib/rijndael/rijndael.py b/plugin.video.alfa/lib/rijndael/rijndael.py new file mode 100644 index 00000000..d185d952 --- /dev/null +++ b/plugin.video.alfa/lib/rijndael/rijndael.py @@ -0,0 +1,361 @@ +""" +A pure python (slow) implementation of rijndael with a decent interface + +To include - + +from rijndael import rijndael + +To do a key setup - + +r = rijndael(key, block_size = 16) + +key must be a string of length 16, 24, or 32 +blocksize must be 16, 24, or 32. Default is 16 + +To use - + +ciphertext = r.encrypt(plaintext) +plaintext = r.decrypt(ciphertext) + +If any strings are of the wrong length a ValueError is thrown +""" + +# ported from the Java reference code by Bram Cohen, April 2001 +# this code is public domain, unless someone makes +# an intellectual property claim against the reference +# code, in which case it can be made public domain by +# deleting all the comments and renaming all the variables + +import copy +import string + +shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]], + [[0, 0], [1, 5], [2, 4], [3, 3]], + [[0, 0], [1, 7], [3, 5], [4, 4]]] + +# [keysize][block_size] +num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}} + +A = [[1, 1, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1, 1, 1], + [1, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 1, 1, 1], + [1, 1, 1, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 1]] + +# produce log and alog tables, needed for multiplying in the +# field GF(2^m) (generator = 3) +alog = [1] +for i in range(255): + j = (alog[-1] << 1) ^ alog[-1] + if j & 0x100 != 0: + j ^= 0x11B + alog.append(j) + +log = [0] * 256 +for i in range(1, 255): + log[alog[i]] = i + +# multiply two elements of GF(2^m) +def mul(a, b): + if a == 0 or b == 0: + return 0 + return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255] + +# substitution box based on F^{-1}(x) +box = [[0] * 8 for i in range(256)] +box[1][7] = 1 +for i in range(2, 256): + j = alog[255 - log[i]] + for t in range(8): + box[i][t] = (j >> (7 - t)) & 0x01 + +B = [0, 1, 1, 0, 0, 0, 1, 1] + +# affine transform: box[i] <- B + A*box[i] +cox = [[0] * 8 for i in range(256)] +for i in range(256): + for t in range(8): + cox[i][t] = B[t] + for j in range(8): + cox[i][t] ^= A[t][j] * box[i][j] + +# S-boxes and inverse S-boxes +S = [0] * 256 +Si = [0] * 256 +for i in range(256): + S[i] = cox[i][0] << 7 + for t in range(1, 8): + S[i] ^= cox[i][t] << (7-t) + Si[S[i] & 0xFF] = i + +# T-boxes +G = [[2, 1, 1, 3], + [3, 2, 1, 1], + [1, 3, 2, 1], + [1, 1, 3, 2]] + +AA = [[0] * 8 for i in range(4)] + +for i in range(4): + for j in range(4): + AA[i][j] = G[i][j] + AA[i][i+4] = 1 + +for i in range(4): + pivot = AA[i][i] + if pivot == 0: + t = i + 1 + while AA[t][i] == 0 and t < 4: + t += 1 + assert t != 4, 'G matrix must be invertible' + for j in range(8): + AA[i][j], AA[t][j] = AA[t][j], AA[i][j] + pivot = AA[i][i] + for j in range(8): + if AA[i][j] != 0: + AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255] + for t in range(4): + if i != t: + for j in range(i+1, 8): + AA[t][j] ^= mul(AA[i][j], AA[t][i]) + AA[t][i] = 0 + +iG = [[0] * 4 for i in range(4)] + +for i in range(4): + for j in range(4): + iG[i][j] = AA[i][j + 4] + +def mul4(a, bs): + if a == 0: + return 0 + r = 0 + for b in bs: + r <<= 8 + if b != 0: + r = r | mul(a, b) + return r + +T1 = [] +T2 = [] +T3 = [] +T4 = [] +T5 = [] +T6 = [] +T7 = [] +T8 = [] +U1 = [] +U2 = [] +U3 = [] +U4 = [] + +for t in range(256): + s = S[t] + T1.append(mul4(s, G[0])) + T2.append(mul4(s, G[1])) + T3.append(mul4(s, G[2])) + T4.append(mul4(s, G[3])) + + s = Si[t] + T5.append(mul4(s, iG[0])) + T6.append(mul4(s, iG[1])) + T7.append(mul4(s, iG[2])) + T8.append(mul4(s, iG[3])) + + U1.append(mul4(t, iG[0])) + U2.append(mul4(t, iG[1])) + U3.append(mul4(t, iG[2])) + U4.append(mul4(t, iG[3])) + +# round constants +rcon = [1] +r = 1 +for t in range(1, 30): + r = mul(2, r) + rcon.append(r) + +del A +del AA +del pivot +del B +del G +del box +del log +del alog +del i +del j +del r +del s +del t +del mul +del mul4 +del cox +del iG + +class rijndael: + def __init__(self, key, block_size = 16): + if block_size != 16 and block_size != 24 and block_size != 32: + raise ValueError('Invalid block size: ' + str(block_size)) + if len(key) != 16 and len(key) != 24 and len(key) != 32: + raise ValueError('Invalid key size: ' + str(len(key))) + self.block_size = block_size + + ROUNDS = num_rounds[len(key)][block_size] + BC = block_size // 4 + # encryption round keys + Ke = [[0] * BC for i in range(ROUNDS + 1)] + # decryption round keys + Kd = [[0] * BC for i in range(ROUNDS + 1)] + ROUND_KEY_COUNT = (ROUNDS + 1) * BC + KC = len(key) // 4 + + # copy user material bytes into temporary ints + tk = [] + for i in range(0, KC): + tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) | + (ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3])) + + # copy values into round key arrays + t = 0 + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t // BC][t % BC] = tk[j] + Kd[ROUNDS - (t // BC)][t % BC] = tk[j] + j += 1 + t += 1 + tt = 0 + rconpointer = 0 + while t < ROUND_KEY_COUNT: + # extrapolate using phi (the round key evolution function) + tt = tk[KC - 1] + tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \ + (S[ tt & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) ^ \ + (rcon[rconpointer] & 0xFF) << 24 + rconpointer += 1 + if KC != 8: + for i in range(1, KC): + tk[i] ^= tk[i-1] + else: + for i in range(1, KC // 2): + tk[i] ^= tk[i-1] + tt = tk[KC // 2 - 1] + tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) << 24 + for i in range(KC // 2 + 1, KC): + tk[i] ^= tk[i-1] + # copy values into round key arrays + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t // BC][t % BC] = tk[j] + Kd[ROUNDS - (t // BC)][t % BC] = tk[j] + j += 1 + t += 1 + # inverse MixColumn where needed + for r in range(1, ROUNDS): + for j in range(BC): + tt = Kd[r][j] + Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \ + U2[(tt >> 16) & 0xFF] ^ \ + U3[(tt >> 8) & 0xFF] ^ \ + U4[ tt & 0xFF] + self.Ke = Ke + self.Kd = Kd + + def encrypt(self, plaintext): + if len(plaintext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Ke = self.Ke + + BC = self.block_size // 4 + ROUNDS = len(Ke) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][0] + s2 = shifts[SC][2][0] + s3 = shifts[SC][3][0] + a = [0] * BC + # temporary work array + t = [] + # plaintext to ints + key + for i in range(BC): + t.append((ord(plaintext[i * 4 ]) << 24 | + ord(plaintext[i * 4 + 1]) << 16 | + ord(plaintext[i * 4 + 2]) << 8 | + ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i]) + # apply round transforms + for r in range(1, ROUNDS): + for i in range(BC): + a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^ + T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in range(BC): + tt = Ke[ROUNDS][i] + result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return ''.join(map(chr, result)) + + def decrypt(self, ciphertext): + if len(ciphertext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext))) + Kd = self.Kd + + BC = self.block_size // 4 + ROUNDS = len(Kd) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][1] + s2 = shifts[SC][2][1] + s3 = shifts[SC][3][1] + a = [0] * BC + # temporary work array + t = [0] * BC + # ciphertext to ints + key + for i in range(BC): + t[i] = (ord(ciphertext[i * 4 ]) << 24 | + ord(ciphertext[i * 4 + 1]) << 16 | + ord(ciphertext[i * 4 + 2]) << 8 | + ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i] + # apply round transforms + for r in range(1, ROUNDS): + for i in range(BC): + a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^ + T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in range(BC): + tt = Kd[ROUNDS][i] + result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return ''.join(map(chr, result)) + +def encrypt(key, block): + return rijndael(key, len(block)).encrypt(block) + +def decrypt(key, block): + return rijndael(key, len(block)).decrypt(block) \ No newline at end of file diff --git a/plugin.video.alfa/lib/rijndael/rijndael_cbc.py b/plugin.video.alfa/lib/rijndael/rijndael_cbc.py new file mode 100644 index 00000000..ce12c68b --- /dev/null +++ b/plugin.video.alfa/lib/rijndael/rijndael_cbc.py @@ -0,0 +1,71 @@ +# https://stackoverflow.com/questions/8356689/python-equivalent-of-phps-mcrypt-rijndael-256-cbc + +class zeropad: + + def __init__(self, block_size): + assert block_size > 0 and block_size < 256 + self.block_size = block_size + + def pad(self, pt): + ptlen = len(pt) + padsize = self.block_size - ((ptlen + self.block_size - 1) % self.block_size + 1) + return pt + "\0" * padsize + + def unpad(self, ppt): + assert len(ppt) % self.block_size == 0 + offset = len(ppt) + if (offset == 0): + return '' + end = offset - self.block_size + 1 + while (offset > end): + offset -= 1; + if (ppt[offset] != "\0"): + return ppt[:offset + 1] + assert false + +class cbc: + + def __init__(self, padding, cipher, iv): + assert padding.block_size == cipher.block_size; + assert len(iv) == cipher.block_size; + self.padding = padding + self.cipher = cipher + self.iv = iv + + def encrypt(self, pt): + ppt = self.padding.pad(pt) + offset = 0 + ct = '' + v = self.iv + while (offset < len(ppt)): + block = ppt[offset:offset + self.cipher.block_size] + block = self.xorblock(block, v) + block = self.cipher.encrypt(block) + ct += block + offset += self.cipher.block_size + v = block + return ct; + + def decrypt(self, ct): + assert len(ct) % self.cipher.block_size == 0 + ppt = '' + offset = 0 + v = self.iv + while (offset < len(ct)): + block = ct[offset:offset + self.cipher.block_size] + decrypted = self.cipher.decrypt(block) + ppt += self.xorblock(decrypted, v) + offset += self.cipher.block_size + v = block + pt = self.padding.unpad(ppt) + return pt; + + def xorblock(self, b1, b2): + # sorry, not very Pythonesk + i = 0 + r = ''; + while (i < self.cipher.block_size): + r += chr(ord(b1[i]) ^ ord(b2[i])) + i += 1 + return r +