# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = 'https://yonkis.to'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabetico", url=host))
itemlist.append(Item(channel=item.channel, action="mas_vistas", title="Series más vistas",
url=host + "/series-mas-vistas"))
itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos episodios añadidos",
url=host))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "/buscar/serie"))
return itemlist
def alfabetico(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="0-9", url=host + "/lista-de-series/0-9"))
for letra in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
itemlist.append(Item(channel=item.channel, action="series", title=letra, url=host+"/lista-de-series/"+letra))
return itemlist
def mas_vistas(item):
logger.info()
data = httptools.downloadpage(item.url).data
matches = re.compile('', re.S).findall(data)
itemlist = []
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail.replace("/90/", "/150/"))
itemlist.append(
Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, show=scrapedtitle, fanart=item.fanart))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
post = "keyword=%s&search_type=serie" % texto
data = httptools.downloadpage(item.url, post=post).data
try:
patron = '(.*?)
')
matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(matches)
for title, url in matches:
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title,
url=urlparse.urljoin(item.url, url), thumbnail=item.thumbnail, show=title))
# Paginador
matches = re.compile('>', re.S).findall(data)
paginador = None
if len(matches) > 0:
paginador = Item(channel=item.channel, action="series", title="!Página siguiente",
url=urlparse.urljoin(item.url, matches[0]), thumbnail=item.thumbnail, show=item.show)
if paginador and len(itemlist) > 0:
itemlist.insert(0, paginador)
itemlist.append(paginador)
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
pattern = '(.*?)(.*?)]+alt="([^"]+)" />