yaske: agregado seccion series
This commit is contained in:
@@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import unicodedata
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
@@ -11,7 +13,11 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
|
||||
HOST = 'http://www.yaske.ro'
|
||||
domain = "yaske.ro"
|
||||
HOST = "http://www." + domain
|
||||
HOST_MOVIES = "http://peliculas." + domain + "/now_playing/"
|
||||
HOST_TVSHOWS = "http://series." + domain + "/popular/"
|
||||
HOST_TVSHOWS_TPL = "http://series." + domain + "/tpl"
|
||||
parameters = channeltools.get_channel_parameters('yaske')
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
@@ -26,38 +32,140 @@ def mainlist(item):
|
||||
item.fanart = fanart_host
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
|
||||
url=HOST,
|
||||
itemlist.append(item.clone(title="Peliculas", text_bold=True, viewcontent='movies',
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
|
||||
itemlist.append(item.clone(title=" Novedades", action="peliculas", viewcontent='movies',
|
||||
url=HOST_MOVIES,
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title=" Estrenos", action="peliculas",
|
||||
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
|
||||
itemlist.append(item.clone(title="Género", action="menu_buscar_contenido", text_bold=True,thumbnail=thumbnail % 'generos', viewmode="thumbnails",
|
||||
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", thumbnail=thumbnail % 'generos', viewmode="thumbnails",
|
||||
url=HOST
|
||||
))
|
||||
itemlist.append(item.clone(title=" Buscar película", action="search", thumbnail=thumbnail % 'buscar',
|
||||
type = "movie" ))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
|
||||
itemlist.append(item.clone(title="Series", text_bold=True, viewcontent='movies',
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title=" Novedades", action="series", viewcontent='movies',
|
||||
url=HOST_TVSHOWS,
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title=" Buscar serie", action="search", thumbnail=thumbnail % 'buscar',
|
||||
type = "tvshow" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
|
||||
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
|
||||
if not page:
|
||||
page = 1
|
||||
url_p = item.url
|
||||
else:
|
||||
page = int(page) + 1
|
||||
if "search" in item.url:
|
||||
url_p += "&page=%s" %page
|
||||
else:
|
||||
url_p += "?page=%s" %page
|
||||
data = httptools.downloadpage(url_p).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '(?s)class="post-item-image btn-play-item".*?'
|
||||
patron += 'href="(http://series[^"]+)">.*?'
|
||||
patron += '<img data-original="([^"]+)".*?'
|
||||
patron += 'glyphicon-play-circle"></i>([^<]+).*?'
|
||||
patron += 'glyphicon-calendar"></i>([^<]+).*?'
|
||||
patron += 'text-muted f-14">(.*?)</h3'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches:
|
||||
scrapedepisodes.strip()
|
||||
year = year.strip()
|
||||
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
|
||||
title = "%s (%s)" %(contentTitle, scrapedepisodes)
|
||||
if "series" in scrapedurl:
|
||||
itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, show=contentTitle,
|
||||
infoLabels={"year": year}, text_color=color1))
|
||||
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
patron_next_page = 'href="([^"]+)">\s*»'
|
||||
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
|
||||
if matches_next_page and len(itemlist)>0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="series", title=">> Página siguiente", thumbnail=thumbnail_host,
|
||||
url=url_p, folder=True, text_color=color3, text_bold=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
post = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'media-object" src="([^"]+).*?'
|
||||
patron += 'media-heading">([^<]+).*?'
|
||||
patron += '<code>(.*?)</div>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedtitle, scrapedcapitulos in matches:
|
||||
id = scrapertools.find_single_match(item.url, "yaske.ro/([0-9]+)")
|
||||
season = scrapertools.find_single_match(scrapedtitle, "[0-9]+")
|
||||
title = scrapedtitle + " (%s)" %scrapedcapitulos.replace("</code>","").replace("\n","")
|
||||
post = {"data[season]" : season, "data[id]" : id, "name" : "list_episodes" , "both" : "0", "type" : "template"}
|
||||
post = urllib.urlencode(post)
|
||||
item.infoLabels["season"] = season
|
||||
itemlist.append(item.clone(action = "capitulos",
|
||||
post = post,
|
||||
title = title,
|
||||
url = HOST_TVSHOWS_TPL
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, post=item.post).data
|
||||
data = data.replace("<wbr>","")
|
||||
patron = 'href=."([^"]+).*?'
|
||||
patron += 'media-heading.">([^<]+).*?'
|
||||
patron += 'fecha de emisi.*?: ([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapeddate in matches:
|
||||
scrapedtitle = scrapedtitle + " (%s)" %scrapeddate
|
||||
episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)")
|
||||
query = item.show + " " + str(item.infoLabels["season"]) + "x" + episode.rjust(2, "0")
|
||||
item.infoLabels["episode"] = episode
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
title = scrapedtitle.decode("unicode-escape"),
|
||||
query = query.replace(" ","+"),
|
||||
url = scrapedurl.replace("\\","")
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
try:
|
||||
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
|
||||
item.extra = ""
|
||||
itemlist.extend(peliculas(item))
|
||||
if item.type == "movie":
|
||||
itemlist.extend(peliculas(item))
|
||||
else:
|
||||
itemlist.extend(series(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
item_pag = itemlist[-1]
|
||||
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
|
||||
itemlist.append(item_pag)
|
||||
else:
|
||||
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
|
||||
|
||||
return itemlist
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -77,7 +185,6 @@ def newest(categoria):
|
||||
item.url = HOST + "/genre/27/"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
@@ -95,8 +202,18 @@ def newest(categoria):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
|
||||
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
|
||||
if not page:
|
||||
page = 1
|
||||
url_p = item.url
|
||||
else:
|
||||
page = int(page) + 1
|
||||
if "search" in item.url:
|
||||
url_p += "&page=%s" %page
|
||||
else:
|
||||
url_p += "?page=%s" %page
|
||||
data = httptools.downloadpage(url_p).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '(?s)class="post-item-image btn-play-item".*?'
|
||||
patron += 'href="([^"]+)">.*?'
|
||||
@@ -105,12 +222,8 @@ def peliculas(item):
|
||||
patron += 'post(.*?)</div.*?'
|
||||
patron += 'text-muted f-14">(.*?)</h3'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
patron_next_page = 'href="([^"]+)">\s*»'
|
||||
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = item.url + matches_next_page
|
||||
|
||||
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
|
||||
query = scrapertools.find_single_match(scrapedurl, 'yaske.ro/[0-9]+/(.*?)/').replace("-","+")
|
||||
year = year.strip()
|
||||
patronidiomas = '<img src="([^"]+)"'
|
||||
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
|
||||
@@ -125,27 +238,26 @@ def peliculas(item):
|
||||
title = "%s %s" % (contentTitle, idiomas_disponibles)
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
|
||||
infoLabels={"year": year}, text_color=color1))
|
||||
infoLabels={"year": year}, text_color=color1, query = query))
|
||||
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
if matches_next_page:
|
||||
patron_next_page = 'href="([^"]+)">\s*»'
|
||||
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
|
||||
if matches_next_page and len(itemlist)>0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
|
||||
url=url_next_page, folder=True, text_color=color3, text_bold=True))
|
||||
|
||||
url=url_p, folder=True, text_color=color3, text_bold=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_buscar_contenido(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'Generos.*?</ul>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
# Extrae las entradas
|
||||
patron = 'href="([^"]+)">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -159,11 +271,7 @@ def menu_buscar_contenido(item):
|
||||
folder = True,
|
||||
viewmode = "movie_with_plot"
|
||||
))
|
||||
|
||||
if item.extra in ['genre', 'audio', 'year']:
|
||||
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
|
||||
else:
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
@@ -171,11 +279,10 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
sublist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
|
||||
patron = '(?s)id="online".*?server="([^"]+)"'
|
||||
mserver = scrapertools.find_single_match(data, patron)
|
||||
url_m = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
|
||||
patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
|
||||
url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver)
|
||||
patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
|
||||
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
|
||||
data = httptools.downloadpage(url_m).data
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
Reference in New Issue
Block a user