Merge pull request #316 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-06-20 15:16:42 -05:00
committed by GitHub
6 changed files with 70 additions and 50 deletions

View File

@@ -35,6 +35,8 @@ def login(pagina):
dom = pagina.split(".")[0]
user = config.get_setting("%suser" %dom, "kbagi")
password = config.get_setting("%spassword" %dom, "kbagi")
if "kbagi" in pagina:
pagina = "k-bagi.com"
if not user:
return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina)
data = httptools.downloadpage("http://%s" % pagina).data
@@ -65,14 +67,14 @@ def mainlist(item):
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://kbagi.com"
item.extra = "http://k-bagi.com"
itemlist.append(item.clone(title="kbagi", action="", text_color=color2))
itemlist.append(
item.clone(title=" Búsqueda", action="search", url="http://kbagi.com/action/SearchFiles"))
item.clone(title=" Búsqueda", action="search", url="http://k-bagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://kbagi.com/action/home/MoreNewestCollections?pageNumber=1"))
url="http://k-bagi.com/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://kbagi.com/action/SearchFiles"))
url="http://k-bagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
logueado, error_message = login("diskokosmiko.mx")
if not logueado:

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse
@@ -17,33 +17,33 @@ def mainlist(item):
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="lista", title="Top Películas", url=urlparse.urljoin(host, "top")))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host))
#itemlist.append(
# Item(channel=item.channel, action="lista", title="Top Películas", url=urlparse.urljoin(host, "top")))
#itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host))
itemlist.append(Item(channel=item.channel, action="explorar", title="Género", url=urlparse.urljoin(host, "genero")))
itemlist.append(Item(channel=item.channel, action="explorar", title="Listado Alfabético",
url=urlparse.urljoin(host, "alfabetico")))
# itemlist.append(Item(channel=item.channel, action="explorar", title="Listado por año", url=urlparse.urljoin(host, "año")))
itemlist.append(Item(channel=item.channel, action="lista", title="Otras Películas (No Bollywood)",
url=urlparse.urljoin(host, "estrenos")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "buscar-")))
#itemlist.append(Item(channel=item.channel, action="explorar", title="Listado Alfabético",
# url=urlparse.urljoin(host, "alfabetico")))
itemlist.append(Item(channel=item.channel, action="explorar", title="Listado por Año", url=urlparse.urljoin(host, "genero")))
#itemlist.append(Item(channel=item.channel, action="lista", title="Otras Películas (No Bollywood)",
# url=urlparse.urljoin(host, "estrenos")))
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "buscar-")))
return itemlist
def explorar(item):
logger.info()
itemlist = list()
url1 = item.title
data = httptools.downloadpage(host).data
urltitle = item.title
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
if 'Género' in url1:
patron = '<div class="d">.+?<h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
if 'Listado Alfabético' in url1:
patron = '<\/li><\/ul>.+?<h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
if 'Año' in url1:
patron = '<ul class="anio"><li>(.+?)<\/ul>'
if 'Género' in urltitle:
patron = "var accion = '<div .+?>(.+?)<\/div>'"
#if 'Listado Alfabético' in urltitle:
# patron = '<\/li><\/ul>.+?<h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
if 'Año' in urltitle:
patron = "var anho = '<div .+?>(.+?)<\/div>'"
data_explorar = scrapertools.find_single_match(data, patron)
patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>'
patron_explorar = '<li class=".+?"><a class=".+?" href="(.+?)">(.+?)<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_explorar, patron_explorar)
for scrapedurl, scrapedtitle in matches:
if 'Acci' in scrapedtitle:
@@ -56,7 +56,9 @@ def explorar(item):
scrapedtitle = 'Histórico'
if 'lico Guerra' in scrapedtitle:
scrapedtitle = 'Bélico Guerra'
if 'Ciencia' in scrapedtitle:
if 'Biogra' in scrapedtitle:
scrapedtitle = 'Biografía'
if 'Ficcion' in scrapedtitle:
scrapedtitle = 'Ciencia Ficción'
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=scrapedurl))
return itemlist
@@ -78,17 +80,22 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
data_mov= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">(.+)<ul class="pag">')
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle
data_mov= scrapertools.find_single_match(data,'<div class="lista-anime">(.+?)<section class="paginacion">')
patron = "<figure class='figure-peliculas'>" #generico
patron += " <a href='(.+?)' .+?>.+?" #scrapedurl
patron += "<img .+? src=(.+?) alt.+?> " #scrapedthumbnail
patron += "<p>(.+?)<\/p>.+?" #scrapedplot
patron += "<p class='.+?anho'>(.+?)" #scrapedyear
patron += "<\/p>.+?<h2>(.+?)<\/h2>" #scrapedtitle
matches = scrapertools.find_multiple_matches(data_mov, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos",
for scrapedurl, scrapedthumbnail, scrapedplot, scrapedyear, scrapedtitle in matches:
if '"' in scrapedthumbnail:
scrapedthumbnail=scrapedthumbnail.replace('"','')
itemlist.append(item.clone(title=scrapedtitle+' ['+scrapedyear+']', url=scrapedurl, plot=scrapedplot, thumbnail=scrapedthumbnail, action="opcion",
show=scrapedtitle))
# Paginacion
patron_pag = '<a href="([^"]+)" title="Siguiente .+?">'
paginasig = scrapertools.find_single_match(data, patron_pag)
logger.info("algoooosadf "+paginasig)
next_page_url = host + paginasig
if paginasig != "":
@@ -97,23 +104,35 @@ def lista(item):
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
def opcion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
itemlist.extend(servertools.find_video_items(data=data))
logger.info("holaa" + data)
patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>'
show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
logger.info("inflos"+data)
patron = '<\/div> <\/div> <a href="(.+?)" class="a-play-cartelera"'
scrapedurl = scrapertools.find_single_match(data, patron)
#for scrapedurl in match:
itemlist.append(item.clone(url=host+scrapedurl, action="findvideos"))
return itemlist
# #def findvideos(item):
# logger.info()
# itemlist = []
# data = httptools.downloadpage(item.url).data
# data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# itemlist.extend(servertools.find_video_items(data=data))
# patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>'
# show = scrapertools.find_single_match(data, patron_show)
# for videoitem in itemlist:
# videoitem.channel = item.channel
# if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
# itemlist.append(
# Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
# action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
# return itemlist

View File

@@ -38,7 +38,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="opciones", title="Opciones",
thumbnail=get_thumb("search.png")))
itemlist.append(Item(channel="tvmoviedb", action="mainlist", title="Busquèda alternativa",
itemlist.append(Item(channel="tvmoviedb", action="mainlist", title="Búsqueda alternativa",
thumbnail=get_thumb("search.png")))
saved_searches_list = get_saved_searches()

View File

@@ -524,8 +524,7 @@ def mark_content_as_watched(item):
# Actualizar toda la serie
new_item = item.clone(contentSeason=-1)
mark_season_as_watched(new_item)
if config.is_xbmc() and item.contentType == 'episode':
if config.is_xbmc(): #and item.contentType == 'episode':
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_content_as_watched_on_kodi(item, item.playcount)

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:kbagi.com|diskokosmiko.mx)/[^\\s'\"]+)",
"pattern": "((?:k-bagi.com|diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
@@ -40,4 +40,4 @@
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
}
}

View File

@@ -10,7 +10,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
domain = "diskokosmiko.mx"
if "kbagi.com" in page_url:
if "k-bagi.com" in page_url:
domain = "kbagi.com"
logueado, error_message = kbagi.login(domain)
if not logueado:
@@ -28,8 +28,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://kbagi.com"
host_string = "kbagi"
host = "http://k-bagi.com"
host_string = "k-bagi"
if "diskokosmiko.mx" in page_url:
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"