diff --git a/plugin.video.alfa/channels/LIKUOO.py b/plugin.video.alfa/channels/LIKUOO.py
index 20925e3c..2ca05d84 100644
--- a/plugin.video.alfa/channels/LIKUOO.py
+++ b/plugin.video.alfa/channels/LIKUOO.py
@@ -7,8 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
-from core import tmdb
-from core import jsontools
host = 'http://www.likuoo.video'
@@ -16,7 +14,8 @@ host = 'http://www.likuoo.video'
def mainlist(item):
logger.info()
itemlist = []
- itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
+ itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
+ itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?s=%s" % texto
try:
- return peliculas(item)
+ return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -46,16 +45,24 @@ def categorias(item):
scrapedplot = ""
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
- itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
+ itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
+ thumbnail=scrapedthumbnail, plot=scrapedplot) )
+ next_page = scrapertools.find_single_match(data,'...»')
+ if next_page!="":
+ next_page = urlparse.urljoin(item.url,next_page)
+ itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
-def peliculas(item):
+def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t| |
", "", data)
- patron = '