diff --git a/mediaserver/alfa.py b/mediaserver/alfa.py
index a7962200..e53fe499 100644
--- a/mediaserver/alfa.py
+++ b/mediaserver/alfa.py
@@ -18,6 +18,7 @@ import HTTPAndWSServer
http_port = config.get_setting("server.port")
myip = config.get_local_ip()
+version = config.get_addon_version()
def thread_name_wrap(func):
@@ -41,7 +42,7 @@ if sys.version_info < (2, 7, 11):
def show_info():
os.system('cls' if os.name == 'nt' else 'clear')
print ("--------------------------------------------------------------------")
- print ("Alfa Iniciado")
+ print ("Alfa %s Iniciado" %version)
print ("La URL para acceder es http://%s:%s" % (myip, http_port))
print ("--------------------------------------------------------------------")
print ("Runtime Path : " + config.get_runtime_path())
@@ -68,7 +69,7 @@ def start():
# Da por levantado el servicio
logger.info("--------------------------------------------------------------------")
- logger.info("Alfa Iniciado")
+ logger.info("Alfa %s Iniciado" %version)
logger.info("La URL para acceder es http://%s:%s" % (myip, http_port))
logger.info("--------------------------------------------------------------------")
logger.info("Runtime Path : " + config.get_runtime_path())
diff --git a/mediaserver/platformcode/config.py b/mediaserver/platformcode/config.py
index a37a0c41..16625238 100644
--- a/mediaserver/platformcode/config.py
+++ b/mediaserver/platformcode/config.py
@@ -14,6 +14,27 @@ settings_dic = {}
adult_setting = {}
+def get_addon_version(linea_inicio=0, total_lineas=2):
+ '''
+ Devuelve el número de de versión del addon, obtenido desde el archivo addon.xml
+ '''
+ path = get_runtime_path() + "\\addon.xml"
+ f = open(path, "rb")
+ data = []
+ for x, line in enumerate(f):
+ if x < linea_inicio: continue
+ if len(data) == total_lineas: break
+ data.append(line)
+ f.close()
+ data1 = "".join(data)
+ # Últimos episodios
.+?]+>.+?
(.*?)'
'(.*?)', re.DOTALL).findall(data)
itemlist = []
-
for url, thumbnail, str_episode, show in matches:
-
try:
episode = int(str_episode.replace("Episodio ", ""))
except ValueError:
@@ -135,28 +112,21 @@ def novedades_episodios(item):
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
fulltitle=title)
-
itemlist.append(new_item)
-
return itemlist
def novedades_anime(item):
logger.info()
-
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '
(.*?).+?
(.*?)
.+?)?', re.DOTALL).findall(data) itemlist = [] - for url, thumbnail, _type, title, plot in matches: - url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) - new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fulltitle=title, plot=plot) if _type != "Película": @@ -165,173 +135,75 @@ def novedades_anime(item): else: new_item.contentType = "movie" new_item.contentTitle = title - itemlist.append(new_item) - return itemlist def listado(item): logger.info() - data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) url_pagination = scrapertools.find_single_match(data, '(.*?)
', re.DOTALL).findall(data) - itemlist = [] - for url, thumbnail, _type, title, plot in matches: - url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) - new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fulltitle=title, plot=plot) - if _type == "Anime": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title - itemlist.append(new_item) - if url_pagination: url = urlparse.urljoin(HOST, url_pagination) title = ">> Pagina Siguiente" - itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) - return itemlist def episodios(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) - - # fix para renumbertools - item.show = scrapertools.find_single_match(data, '(.*?)
') - - matches = re.compile('href="([^"]+)">(.*?)
', re.DOTALL).findall(data) - - if matches: - for url, thumb, title in matches: - title = title.strip() - url = urlparse.urljoin(item.url, url) - # thumbnail = item.thumbnail - - try: - episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$")) - except ValueError: - season = 1 - episode = 1 - else: - season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) - - title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title) - - itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title, - fanart=item.thumbnail, contentType="episode")) - else: - # no hay thumbnail - matches = re.compile(']+>(.*?)<', re.DOTALL).findall(data) - - for url, title in matches: - title = title.strip() - url = urlparse.urljoin(item.url, url) - thumb = item.thumbnail - - try: - episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$")) - except ValueError: - season = 1 - episode = 1 - else: - season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) - - title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title) - - itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title, - fanart=item.thumbnail, contentType="episode")) - + info = eval(scrapertools.find_single_match(data, 'anime_info = (.*?);')) + episodes = eval(scrapertools.find_single_match(data, 'var episodes = (.*?);')) + for episode in episodes: + url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0]) + title = '1x%s Episodio %s' % (episode[0], episode[0]) + itemlist.append(item.clone(title=title, url=url, action='findvideos', show=info[1])) + itemlist = itemlist[::-1] if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) - return itemlist def findvideos(item): logger.info() - itemlist = [] - data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data) - list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'