Merge pull request #322 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-06-27 15:55:30 -05:00
committed by GitHub
16 changed files with 161 additions and 233 deletions

View File

@@ -0,0 +1,24 @@
Esta versión de Alfa no necesita de ningún programa para instalar (tampoco kodi), es una versión independiente que solo necesita un navegador web y un equipo (en el cual será instalado) para ver el contenido desde cualquier dispositivo que cuente con un navegador web.
REQUISITOS:
Se necesita que esté instalado python 2.x desde aqui: https://www.python.org/
COMO INSTALAR LA VERSION MEDIASERVER:
-Descargar Alfa desde el reposotorio de Github: https://github.com/alfa-addon/addon (opcion Clone or download - Download zip
-El archivo descargado (addon-master.zip) abrirlo e ingresar a la carpeta: addon-master
-Descomprimir la carpeta plugin.video.alfa en alguna carpeta
-Luego descomprimir la carpeta mediaserver encima de la carpeta plugi.video.alfa reemplazando los archivos existentes.
COMO INICIAR LA VERSION MEDIASERVER
Para iniciar: python alfa.py
Y mostrará en pantalla la url a la cual se puede conectar desde cualquier dispositivo que contenga un navegador web.
Ejemplo:
http://192.168.1.10:8080

View File

@@ -1,2 +0,0 @@
Debe ejecutar primero el archivo "script.py", si no lo hizo antes.
Una vez realizado el proceso podrá ejecutar como siempre "alfa.py" para iniciar el addon.

View File

@@ -310,6 +310,23 @@ def verify_directories_created():
logger.debug("Creating %s: %s" % (path, saved_path))
filetools.mkdir(saved_path)
config_paths = [["folder_movies", "CINE"],
["folder_tvshows", "SERIES"]]
for path, default in config_paths:
saved_path = get_setting(path)
if not saved_path:
saved_path = default
set_setting(path, saved_path)
content_path = filetools.join(get_videolibrary_path(), saved_path)
if not filetools.exists(content_path):
logger.debug("Creating %s: %s" % (path, content_path))
# si se crea el directorio
filetools.mkdir(content_path)
def get_local_ip():
import socket
@@ -372,6 +389,7 @@ if not os.path.exists(get_data_path()):
TRANSLATION_FILE_PATH = os.path.join(get_runtime_path(), "resources", "language", "Spanish", "strings.po")
load_settings()
# modo adulto:
# sistema actual 0: Nunca, 1:Siempre, 2:Solo hasta que se reinicie sesión
# si es == 2 lo desactivamos.

View File

@@ -131,9 +131,12 @@ div.header {
}
div.header > div.logo {
float: left;
height: 50px;
width: 70px;
background-image: url("data:image/svg+xml;charset=US-ASCII,%3Csvg%20version%3D%221.1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20width%3D%2270px%22%20height%3D%2250px%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20xml%3Aspace%3D%22preserve%22%3E%0A%3Crect%20fill%3D%22%2391D9E8%22%20stroke%3D%22%2301455C%22%20stroke-width%3D%221%22%20x%3D%229%22%20y%3D%2215%22%20width%3D%2240.4%22%20height%3D%2226.5%22%2F%3E%0A%3Crect%20fill%3D%22%2303BFEC%22%20stroke%3D%22%2301455C%22%20stroke-width%3D%221%22%20x%3D%226%22%20y%3D%229%22%20width%3D%2210.8%22%20height%3D%229.3%22%20rx%3D%222%22%20ry%3D%222%22%2F%3E%0A%3Crect%20fill%3D%22%2303BFEC%22%20stroke%3D%22%2301455C%22%20stroke-width%3D%221%22%20x%3D%2237%22%20y%3D%2235%22%20width%3D%2215.5%22%20height%3D%2212.4%22%20rx%3D%222%22%20ry%3D%222%22%2F%3E%0A%3Crect%20fill%3D%22%2300ABE3%22%20stroke%3D%22%2301455C%22%20stroke-width%3D%221%22%20x%3D%2233%22%20y%3D%222%22%20width%3D%2234.2%22%20height%3D%2225%22%20rx%3D%222%22%20ry%3D%222%22%2F%3E%0A%3Crect%20fill%3D%22%2303BFEC%22%20stroke%3D%22%2301455C%22%20stroke-width%3D%221%22%20x%3D%223%22%20y%3D%2227%22%20width%3D%2224.8%22%20height%3D%2217%22%20rx%3D%222%22%20ry%3D%222%22%2F%3E%0A%3C%2Fsvg%3E");
height: 45px;
width: 60px;
margin-left: 15px;
margin-top: 2px;
background-repeat: no-repeat;
background-image: url("https://github.com/alfa-addon/addon/raw/master/mediaserver/platformcode/template/logo-mediaserver.png");
}
div.header > a.settings:after {
@@ -1303,4 +1306,4 @@ ul.itemlist > li.item_list > a.item > h3.label {
padding-top: 0px;
left: 0px;
right: 0px;
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.4 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -1,110 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
import shutil
from platformcode import config, logger, platformtools
def conversion():
logger.info()
data = ""
try:
# do a backup
path_settings = os.path.join(config.get_data_path(), "settings.xml")
path_settings_backup = os.path.join(config.get_data_path(), "settings.backup.xml")
shutil.copy(path_settings, path_settings_backup)
# open file
f = open(path_settings, "r")
# copy = open(path_settings2, "w")
logger.info(" ---")
logger.info(" --- 1")
logger.info(" --- 2")
logger.info(" --- 3")
data_aux = ""
begin_tag = "<settings>\n"
end_tag = "</settings>\n"
adult_data = ' <setting id="adult_aux_intro_password" value="" />\n'
adult_data += ' <setting id="adult_aux_new_password1" value="" />\n'
adult_data += ' <setting id="adult_aux_new_password2" value="" />\n'
adult_data += ' <setting id="adult_mode" value="0" />\n'
adult_data += ' <setting id="adult_password" value="0000" />\n'
adult_data += ' <setting id="adult_request_password" value="false" />\n'
for line in f:
matches = re.findall('<setting id="([^"]*)" value="([^"]*)', line, re.DOTALL)
logger.info("macthes %s" % matches)
if not matches:
logger.info("no matches")
# for <settings></settings> tag
# data += line
else:
logger.info("Matches")
for _id, value in matches:
logger.info(" dentro del for")
logger.info(" _id:%s value:%s" % (_id, value))
if _id not in ["adult_aux_intro_password", "adult_aux_new_password1", "adult_aux_new_password2",
"adult_mode", "adult_password", "adult_request_password", "adult_pin"]:
logger.info(" linea %s" % line)
logger.info(" value %s" % value)
if value:
# logger.info(" type value!! %s" % type(value))
logger.info(" antes value!! %s" % value)
if "(str, " in value:
if "(str, &apos;" in value:
value = value.replace("(str, &apos;", "")
value = value.replace("&apos;)", "")
elif "(str, '":
value = value.replace("(str, '", "")
value = value.replace("')", "")
elif "(bool, " in value:
value = value.replace("(bool, ", "")
if value == "True)":
value = "true"
else:
value = "false"
value = value.replace('\\\\', '\\')
logger.info(" despues value!! %s" % value)
aux_line = '<setting id="%s" value="%s" />\n' % (_id, value)
logger.info(" aux_line %s" % aux_line)
data_aux += " " + aux_line
f.close()
data = begin_tag + adult_data + data_aux + end_tag
copy_file = open(path_settings, "w")
copy_file.write(data)
copy_file.close()
while True:
import sys
logger.info("sys ve %s" % sys.version_info)
if sys.version_info > (3, 0):
value = input("Alfa\nCorregido un error en la seccion adultos, se ha reseteado la contrasena a por "
"defecto, tendra que cambiarla de nuevo si lo desea.\n Escriba 's', si lo ha entendido: ")
else:
value = raw_input("Alfa\nCorregido un error en la seccion adultos, se ha reseteado la contrasena a por "
"defecto, tendra que cambiarla de nuevo si lo desea.\n Escriba 's', si lo ha entendido: ")
logger.debug("value %s" % value)
if value.lower() == 's':
break
logger.info("En disclaimer clickó 'No'")
logger.info("En disclaimer clickó 'Si'")
except Exception, ex:
template = "An exception of type %s occured. Arguments:\n%r"
message = template % (type(ex).__name__, ex.args)
logger.info(message)
print("Alfa", "Error, en conversión")
logger.info("Datos a guardar %s" % data)
if __name__ == "__main__":
conversion()

View File

@@ -413,9 +413,6 @@ def episodios(item):
patron = "<li><a href='([^']+)'>[^<]+</a></li>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if "temporada-0" in scrapedurl:
continue
## Episodios
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")

View File

@@ -16,7 +16,7 @@ from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from core import jsontools
from channels import side_menu
THUMBNAILS = {'0': 'posters', '1': 'banners', '2': 'squares'}
@@ -245,13 +245,15 @@ def novedades(item):
list_canales, any_active = get_channels_list()
if mode=='silent' and any_active and len(list_canales[item.extra]) > 0:
side_menu.set_menu_settings(item)
aux_list=[]
for canal in list_canales[item.extra]:
if len(aux_list)<2:
aux_list.append(canal)
list_canales[item.extra]=aux_list
if config.is_xbmc():
from channels import side_menu
if mode=='silent' and any_active and len(list_canales[item.extra]) > 0:
side_menu.set_menu_settings(item)
aux_list=[]
for canal in list_canales[item.extra]:
if len(aux_list)<2:
aux_list.append(canal)
list_canales[item.extra]=aux_list
if mode == 'set_cache':
list_canales[item.extra] = list_canales[item.extra][2:]

View File

@@ -490,7 +490,7 @@ def episodios(item):
# Sin valoración:
# show = re.sub(r"\s\(\d+\.\d+\)", "", item.show)
itemlist.append(
Item(channel='plusdede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, token=token,
Item(channel='plusdede', title="Añadir esta serie a la videoteca", url=item.url, token=token,
action="add_serie_to_library", extra="episodios###", show=show))
itemlist.append(
Item(channel='plusdede', title="Descargar todos los episodios de la serie", url=item.url, token=token,

View File

@@ -494,9 +494,62 @@ def update_tvshow(item):
p_dialog.close()
def mark_content_as_watched2(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
if filetools.exists(item.nfo):
head_nfo, it = videolibrarytools.read_nfo(item.nfo)
#logger.debug(it)
if item.contentType == 'movie':
name_file = os.path.splitext(os.path.basename(item.nfo))[0]
if name_file != 'tvshow' :
it.library_playcounts.update({name_file: item.playcount})
if item.contentType == 'episode' or item.contentType == 'list' or name_file == 'tvshow':
# elif item.contentType == 'episode':
name_file = os.path.splitext(os.path.basename(item.strm_path))[0]
num_season = name_file [0]
item.__setattr__('contentType', 'episode')
item.__setattr__('contentSeason', num_season)
#logger.debug(name_file)
else:
name_file = item.contentTitle
# logger.debug(name_file)
if not hasattr(it, 'library_playcounts'):
it.library_playcounts = {}
it.library_playcounts.update({name_file: item.playcount})
# se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
if item.contentType != 'movie':
it = check_season_playcount(it, item.contentSeason)
#logger.debug(it)
# Guardamos los cambios en item.nfo
if filetools.write(item.nfo, head_nfo + it.tojson()):
item.infoLabels['playcount'] = item.playcount
logger.debug(item.playcount)
# if item.contentType == 'episodesss':
# Actualizar toda la serie
#new_item = item.clone(contentSeason=-1)
#mark_season_as_watched(new_item)
if config.is_xbmc():
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_content_as_watched_on_kodi(item , item.playcount)
# logger.debug(item)
platformtools.itemlist_refresh()
def mark_content_as_watched(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
logger.debug("item:\n" + item.tostring('\n'))
if filetools.exists(item.nfo):
head_nfo, it = videolibrarytools.read_nfo(item.nfo)
@@ -520,10 +573,11 @@ def mark_content_as_watched(item):
if filetools.write(item.nfo, head_nfo + it.tojson()):
item.infoLabels['playcount'] = item.playcount
if item.contentType == 'tvshow':
if item.contentType == 'tvshow' and item.type != 'episode' :
# Actualizar toda la serie
new_item = item.clone(contentSeason=-1)
mark_season_as_watched(new_item)
if config.is_xbmc(): #and item.contentType == 'episode':
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_content_as_watched_on_kodi(item, item.playcount)
@@ -555,7 +609,7 @@ def mark_season_as_watched(item):
season, episode = season_episode.split("x")
if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason):
name_file = os.path.splitext(os.path.basename(f))[0]
name_file = os.path.splitext(os.path.basename(i))[0]
it.library_playcounts[name_file] = item.playcount
episodios_marcados += 1
@@ -685,16 +739,19 @@ def check_season_playcount(item, season):
def check_tvshow_playcount(item, season):
logger.info()
# logger.debug(item)
if season:
temporadas_serie = 0
temporadas_vistas_serie = 0
for key, value in item.library_playcounts.iteritems():
if key == ("season %s" % season):
#if key.startswith("season %s" % season):
if key.startswith("season" ):
temporadas_serie += 1
if value > 0:
temporadas_vistas_serie += 1
#logger.debug(temporadas_serie)
if temporadas_serie == temporadas_vistas_serie:
if temporadas_serie == temporadas_vistas_serie:
item.library_playcounts.update({item.title: 1})
else:
item.library_playcounts.update({item.title: 0})

View File

@@ -22,28 +22,12 @@ def cachePage(url, post=None, headers=None, modoCache=None, timeout=None):
def downloadpage(url, post=None, headers=None, follow_redirects=True, timeout=None, header_to_get=None):
response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
timeout=timeout)
if header_to_get:
return response.headers.get(header_to_get)
else:
return response.data
# def downloadpageWithResult(url, post=None, headers=None, follow_redirects=True, timeout=None, header_to_get=None):
# response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
# timeout=timeout)
#
# if header_to_get:
# return response.headers.get(header_to_get)
# else:
# return response.data, response.code
# def downloadpageWithoutCookies(url):
# response = httptools.downloadpage(url, cookies=False)
# return response.data
def downloadpageGzip(url):
response = httptools.downloadpage(url, add_referer=True)
return response.data
@@ -60,23 +44,12 @@ def get_header_from_response(url, header_to_get="", post=None, headers=None):
return response.headers.get(header_to_get)
# def get_headers_from_response(url, post=None, headers=None):
# response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True)
# return response.headers.items()
def read_body_and_headers(url, post=None, headers=None, follow_redirects=False, timeout=None):
response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
timeout=timeout)
return response.data, response.headers
# def anti_cloudflare(url, host="", headers=None, post=None, location=False):
# # anti_cloudfare ya integrado en httptools por defecto
# response = httptools.downloadpage(url, post=post, headers=headers)
# return response.data
def printMatches(matches):
i = 0
for match in matches:
@@ -130,17 +103,6 @@ def unescape(text):
else:
# named entity
try:
'''
if text[1:-1] == "amp":
text = "&amp;amp;"
elif text[1:-1] == "gt":
text = "&amp;gt;"
elif text[1:-1] == "lt":
text = "&amp;lt;"
else:
print text[1:-1]
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
'''
import htmlentitydefs
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
except KeyError:
@@ -385,27 +347,6 @@ def remove_show_from_title(title, show):
return title
# def getRandom(str):
# return get_md5(str)
# def unseo(cadena):
# if cadena.upper().startswith("VER GRATIS LA PELICULA "):
# cadena = cadena[23:]
# elif cadena.upper().startswith("VER GRATIS PELICULA "):
# cadena = cadena[20:]
# elif cadena.upper().startswith("VER ONLINE LA PELICULA "):
# cadena = cadena[23:]
# elif cadena.upper().startswith("VER GRATIS "):
# cadena = cadena[11:]
# elif cadena.upper().startswith("VER ONLINE "):
# cadena = cadena[11:]
# elif cadena.upper().startswith("DESCARGA DIRECTA "):
# cadena = cadena[17:]
# return cadena
# scrapertools.get_filename_from_url(media_url)[-4:]
def get_filename_from_url(url):
import urlparse
parsed_url = urlparse.urlparse(url)
@@ -465,7 +406,10 @@ def get_season_and_episode(title):
try:
matches = re.compile(patron, re.I).search(title)
if matches:
filename = matches.group(1).lstrip('0') + "x" + matches.group(2).zfill(2)
if len(matches.group(1)) == 1:
filename = matches.group(1) + "x" + matches.group(2).zfill(2)
else:
filename = matches.group(1).lstrip('0') + "x" + matches.group(2).zfill(2)
break
except:
pass
@@ -473,27 +417,3 @@ def get_season_and_episode(title):
logger.info("'" + title + "' -> '" + filename + "'")
return filename
# def get_sha1(cadena):
# try:
# import hashlib
# devuelve = hashlib.sha1(cadena).hexdigest()
# except:
# import sha
# import binascii
# devuelve = binascii.hexlify(sha.new(cadena).digest())
#
# return devuelve
# def get_md5(cadena):
# try:
# import hashlib
# devuelve = hashlib.md5(cadena).hexdigest()
# except:
# import md5
# import binascii
# devuelve = binascii.hexlify(md5.new(cadena).digest())
#
# return devuelve

View File

@@ -52,7 +52,7 @@ def mark_auto_as_watched(item):
item.playcount = 1
sync_with_trakt = True
from channels import videolibrary
videolibrary.mark_content_as_watched(item)
videolibrary.mark_content_as_watched2(item)
break
time.sleep(30)

View File

@@ -12,6 +12,8 @@ def test_video_exists(page_url):
except:
pass
if response.code == 404:
return False, "[Rapidvideo] El archivo no existe ó ha sido borrado"
if not response.data or "urlopen error [Errno 1]" in str(response.code):
from platformcode import config
if config.is_xbmc():

View File

@@ -5,6 +5,10 @@
{
"pattern": "(vshare.io/v/[a-zA-Z0-9/-]+)",
"url": "http://\\1"
},
{
"pattern": "(vshare.eu/embed-[a-zA-Z0-9/-]+.html)",
"url": "http://\\1"
}
]
},

View File

@@ -26,22 +26,25 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if flowplayer:
return [["FLV", flowplayer.group(1)]]
jsUnpack = jsunpack.unpack(data)
logger.debug(jsUnpack)
video_urls = []
try:
jsUnpack = jsunpack.unpack(data)
logger.debug(jsUnpack)
fields = re.search("\[([^\]]+).*?parseInt\(value\)-(\d+)", jsUnpack)
if fields:
logger.debug("Values: " + fields.group(1))
logger.debug("Substract: " + fields.group(2))
substract = int(fields.group(2))
fields = re.search("\[([^\]]+).*?parseInt\(value\)-(\d+)", jsUnpack)
if fields:
logger.debug("Values: " + fields.group(1))
logger.debug("Substract: " + fields.group(2))
substract = int(fields.group(2))
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult)
logger.debug(strResult)
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources:
video_urls.append([label, url])
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
except:
url = scrapertools.find_single_match(data,'<source src="([^"]+)')
video_urls.append(["MP4", url])
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult)
logger.debug(strResult)
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources:
video_urls.append([label, url])
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
return video_urls

View File

@@ -13,11 +13,14 @@ from core import filetools
from core import videolibrarytools
from platformcode import config, logger
from platformcode import platformtools
from channels import videolibrary
def update(path, p_dialog, i, t, serie, overwrite):
logger.info("Actualizando " + path)
insertados_total = 0
#logger.debug(serie)
head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo')
# logger.debug("%s: %s" %(serie.contentSerieName,str(list_canales) ))
for channel, url in serie.library_urls.items():
@@ -35,6 +38,7 @@ def update(path, p_dialog, i, t, serie, overwrite):
pathchannels = filetools.join(config.get_runtime_path(), "channels", serie.channel + '.py')
logger.info("Cargando canal: " + pathchannels + " " +
serie.channel)
logger.debug(serie)
if serie.library_filter_show:
serie.show = serie.library_filter_show.get(channel, serie.contentSerieName)
@@ -46,10 +50,16 @@ def update(path, p_dialog, i, t, serie, overwrite):
if int(overwrite) == 3:
# Sobrescribir todos los archivos (tvshow.nfo, 1x01.nfo, 1x01 [canal].json, 1x01.strm, etc...)
insertados, sobreescritos, fallidos = videolibrarytools.save_tvshow(serie, itemlist)
serie= videolibrary.check_season_playcount(serie, serie.contentSeason)
if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()):
serie.infoLabels['playcount'] = serie.playcount
else:
insertados, sobreescritos, fallidos = videolibrarytools.save_episodes(path, itemlist, serie,
silent=True,
overwrite=overwrite)
it = videolibrary.check_season_playcount(it, it.contentSeason)
if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()):
serie.infoLabels['playcount'] = serie.playcount
insertados_total += insertados
except Exception, ex: