From 4c949d5d89b858d405acee8974caa439b5d7f8a4 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 12 Sep 2018 11:51:44 +0200 Subject: [PATCH] =?UTF-8?q?Generictools:=20c=C3=A1lculo=20tama=C3=B1o=20.t?= =?UTF-8?q?orrent?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/divxtotal.py | 26 +++- plugin.video.alfa/channels/elitetorrent.py | 31 +++-- plugin.video.alfa/channels/estrenosgo.py | 29 +++- plugin.video.alfa/channels/grantorrent.py | 16 ++- plugin.video.alfa/channels/mejortorrent1.py | 16 ++- plugin.video.alfa/channels/newpct1.py | 42 ++++-- plugin.video.alfa/lib/generictools.py | 141 ++++++++++++++++++-- 7 files changed, 258 insertions(+), 43 deletions(-) diff --git a/plugin.video.alfa/channels/divxtotal.py b/plugin.video.alfa/channels/divxtotal.py index 8e1ba213..1a0deaf5 100644 --- a/plugin.video.alfa/channels/divxtotal.py +++ b/plugin.video.alfa/channels/divxtotal.py @@ -519,17 +519,35 @@ def findvideos(item): item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Ahora tratamos los enlaces .torrent - for scrapedurl in matches: #leemos los torrents con la diferentes calidades + for scrapedurl in matches: #leemos los torrents con la diferentes calidades #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() + #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent + size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]') + if not size: + size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent + if size: + item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía + item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título + size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b') + item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía + item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad + #Ahora pintamos el link del Torrent item_local.url = scrapedurl if host not in item_local.url and host.replace('https', 'http') not in item_local.url : item_local.url = host + item_local.url - item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos + item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) + + #Preparamos título y calidad, quitamos etiquetas vacías + item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title) + item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title) + item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality) + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality) + item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + item_local.alive = "??" #Calidad del link sin verificar item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Seridor Torrent diff --git a/plugin.video.alfa/channels/elitetorrent.py b/plugin.video.alfa/channels/elitetorrent.py index 6842c037..1a052a4a 100644 --- a/plugin.video.alfa/channels/elitetorrent.py +++ b/plugin.video.alfa/channels/elitetorrent.py @@ -171,8 +171,11 @@ def listado(item): #Limpiamos el título de la basura innecesaria title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "") - title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "") + title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "").replace("temporada", "").replace("Temporada", "").replace("capitulo", "").replace("Capitulo", "") + + title = re.sub(r'(?:\d+)?x.?\s?\d+', '', title) title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip() + item_local.from_title = title #Guardamos esta etiqueta para posible desambiguación de título if item_local.extra == "peliculas": #preparamos Item para películas @@ -190,16 +193,17 @@ def listado(item): item_local.contentType = "episode" item_local.extra = "series" epi_mult = scrapertools.find_single_match(item_local.url, r'cap.*?-\d+-al-(\d+)') - item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'temp.*?-(\d+)') + item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'temporada-(\d+)') item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'cap.*?-(\d+)') if not item_local.contentSeason: item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'-(\d+)[x|X]\d+') if not item_local.contentEpisodeNumber: item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'-\d+[x|X](\d+)') - if item_local.contentSeason < 1: - item_local.contentSeason = 1 + if not item_local.contentSeason or item_local.contentSeason < 1: + item_local.contentSeason = 0 if item_local.contentEpisodeNumber < 1: item_local.contentEpisodeNumber = 1 + item_local.contentSerieName = title if epi_mult: title = "%sx%s al %s -" % (item_local.contentSeason, str(item_local.contentEpisodeNumber).zfill(2), str(epi_mult).zfill(2)) #Creamos un título con el rango de episodios @@ -269,11 +273,11 @@ def findvideos(item): #data = unicode(data, "utf-8", errors="replace") #Añadimos el tamaño para todos - size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w[b|B]s)\]') + size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B]s)\]') if size: item.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.title) #Quitamos size de título, si lo traía item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título - item.quality = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.quality) #Quitamos size de calidad, si lo traía + item.quality = re.sub('\s\[\d+,?\d*?\s\w\s?[b|B]s\]', '', item.quality) #Quitamos size de calidad, si lo traía patron_t = '
Formato:<\/b>&\w+;\s?([^<]+)
'): item_local.quality = scrapertools.find_single_match(data, 'Formato:<\/b>&\w+;\s?([^<]+)
') elif "hdtv" in item_local.url.lower() or "720p" in item_local.url.lower() or "1080p" in item_local.url.lower() or "4k" in item_local.url.lower(): item_local.quality = scrapertools.find_single_match(item_local.url, '.*?_([H|7|1|4].*?)\.torrent') item_local.quality = item_local.quality.replace("_", " ") - + # Extrae el tamaño del vídeo if scrapertools.find_single_match(data, 'Tama.*?:<\/b>&\w+;\s?([^<]+B)Tama.*?:<\/b>&\w+;\s?([^<]+B)
Size:<\/strong>?\s(\d+?\.?\d*?\s\w[b|B])<\/span>') size = size.replace(".", ",") #sustituimos . por , porque Unify lo borra if not size: - size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w[b|B])\]') + size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]') + if not size: + size = generictools.get_torrent_size(item.url) #Buscamos el tamaño en el .torrent if size: item.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item.title) #Quitamos size de título, si lo traía item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b') - item.quality = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item.quality) #Quitamos size de calidad, si lo traía + item.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item.quality) #Quitamos size de calidad, si lo traía #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) @@ -1399,8 +1401,15 @@ def findvideos(item): else: quality = item_local.quality item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language)) #Preparamos título de Torrent - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos etiquetas vacías - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos colores vacíos + + #Preparamos título y calidad, quitamos etiquetas vacías + item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title) + item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title) + item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', quality) + quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', quality) + quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + item_local.alive = "??" #Calidad del link sin verificar item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Servidor @@ -1485,9 +1494,15 @@ def findvideos(item): item_local.action = "play" item_local.server = servidor item_local.url = enlace - item_local.title = item_local.title.replace("[]", "").strip() - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() + + #Preparamos título y calidad, quitamos etiquetas vacías + item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title) + item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title) + item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality) + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality) + item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + itemlist.append(item_local.clone()) except: @@ -1582,9 +1597,16 @@ def findvideos(item): item_local.action = "play" item_local.server = servidor item_local.url = enlace - item_local.title = parte_title.replace("[]", "").strip() - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() - item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip() + item_local.title = parte_title.strip() + + #Preparamos título y calidad, quitamos etiquetas vacías + item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title) + item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title) + item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality) + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality) + item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + itemlist.append(item_local.clone()) except: diff --git a/plugin.video.alfa/lib/generictools.py b/plugin.video.alfa/lib/generictools.py index fa6bb57c..24d7d5c9 100644 --- a/plugin.video.alfa/lib/generictools.py +++ b/plugin.video.alfa/lib/generictools.py @@ -8,6 +8,7 @@ # ------------------------------------------------------------ import re +import os import sys import urllib import urlparse @@ -236,8 +237,7 @@ def post_tmdb_listado(item, itemlist): del item.channel_alt if item.url_alt: del item.url_alt - if item.extra2: - del item.extra2 + #Ajustamos el nombre de la categoría if not item.category_new: item.category_new = '' @@ -389,8 +389,8 @@ def post_tmdb_listado(item, itemlist): if item_local.infoLabels['episodio_titulo']: item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip() title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() - title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip() - title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip() + title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', title).strip() + title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', title).strip() if item.category_new == "newest": #Viene de Novedades. Marcamos el título con el nombre del canal title += ' -%s-' % scrapertools.find_single_match(item_local.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize() @@ -766,6 +766,7 @@ def post_tmdb_episodios(item, itemlist): #Si no está el título del episodio, pero sí está en "title", lo rescatamos if not item_local.infoLabels['episodio_titulo'] and item_local.infoLabels['title'].lower() != item_local.infoLabels['tvshowtitle'].lower(): item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['title'] + item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace('GB', 'G B').replace('MB', 'M B') #Preparamos el título para que sea compatible con Añadir Serie a Videoteca if "Temporada" in item_local.title: #Compatibilizamos "Temporada" con Unify @@ -792,8 +793,8 @@ def post_tmdb_episodios(item, itemlist): item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip() item_local.infoLabels['title'] = item_local.infoLabels['title'].replace(" []", "").strip() item_local.title = item_local.title.replace(" []", "").strip() - item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() - item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip() + item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?-?\s?\]?\]\[\/COLOR\]', '', item_local.title).strip() + item_local.title = re.sub(r'\s?\[COLOR \w+\]-?\s?\[\/COLOR\]', '', item_local.title).strip() #Si la información de num. total de episodios de TMDB no es correcta, tratamos de calcularla if num_episodios < item_local.contentEpisodeNumber: @@ -1054,8 +1055,8 @@ def post_tmdb_findvideos(item, itemlist): title_gen = item.title #Limpiamos etiquetas vacías - title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías - title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos + title_gen = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías + title_gen = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos title_gen = title_gen.replace(" []", "").strip() #Quitamos etiquetas vacías title_videoteca = title_gen #Salvamos el título para Videoteca @@ -1103,7 +1104,131 @@ def post_tmdb_findvideos(item, itemlist): return (item, itemlist) + +def get_torrent_size(url): + logger.info() + + """ + + Módulo extraido del antiguo canal ZenTorrent + + Calcula el tamaño de los archivos que contienen un .torrent. Descarga el archivo .torrent en una carpeta, + lo lee y descodifica. Si contiene múltiples archivos, suma el tamaño de todos ellos + + Llamada: generictools.get_torrent_size(url) + Entrada: url: url del archivo .torrent + Salida: size: str con el tamaño y tipo de medida ( MB, GB, etc) + + """ + + def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "M B", "G B", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) + + def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + return data + + def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + + #Móludo principal + size = "" + try: + torrents_path = config.get_videolibrary_path() + '/torrents' #path para dejar el .torrent + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) #si no está la carpeta la creamos + + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0' + urllib.urlretrieve(url, torrents_path + "/generictools.torrent") #desacargamos el .torrent a la carpeta + torrent_file = open(torrents_path + "/generictools.torrent", "rb").read() #leemos el .torrent + + if "used CloudFlare" in torrent_file: #Si tiene CloudFlare, usamos este proceso + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/generictools.torrent") + torrent_file = open(torrents_path + "/generictools.torrent", "rb").read() + except: + torrent_file = "" + + torrent = decode(torrent_file) #decodificamos el .torrent + + #si sólo tiene un archivo, tomamos la longitud y la convertimos a una unidad legible, si no dará error + try: + sizet = torrent["info"]['length'] + size = convert_size(sizet) + except: + pass + + #si tiene múltiples archivos sumamos la longitud de todos + if not size: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") + sizet = sum([int(i) for i in check_video]) + size = convert_size(sizet) + + except: + logger.error('ERROR al buscar el tamaño de un .Torrent: ' + url) + + try: + os.remove(torrents_path + "/generictools.torrent") #borramos el .torrent + except: + pass + + #logger.debug(url + ' / ' + size) + + return size + + def get_field_from_kodi_DB(item, from_fields='*', files='file'): logger.info() """