# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
host = 'http://mispelisyseries.com/'
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis=get_thumb("channels_movie.png")
thumb_series=get_thumb("channels_tvshow.png")
thumb_search = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
extra="peliculas", thumbnail=thumb_pelis ))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
return itemlist
def submenu(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
patron = '
.*?(.*?)'
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
data = ' Documentales'
else:
data = scrapertools.get_match(data, patron)
patron = '<.*?href="([^"]+)".*?>([^>]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
itemlist.append(
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
if item.extra == "peliculas":
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
itemlist.append(
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
return itemlist
def alfabeto(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
patron = ''
data = scrapertools.get_match(data, patron)
patron = ']+>([^>]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.upper()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
return itemlist
def listado(item):
logger.info()
itemlist = []
url_next_page =''
data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data)
#data = httptools.downloadpage(item.url).data
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
if item.modo != 'next' or item.modo =='':
patron = ''
fichas = scrapertools.get_match(data, patron)
page_extra = item.extra
else:
fichas = data
page_extra = item.extra
patron = ']+>.*?' # el thumbnail
patron += '(.*?)?<\/h2>' # titulo alternativo
patron += '([^<].*?)?<' # la calidad
#logger.debug("patron: " + patron + " / fichas: " + fichas)
matches = re.compile(patron, re.DOTALL).findall(fichas)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
modo = 'continue'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = 'Next<\/a>'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
modo = 'continue'
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
modo = 'next'
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
url = scrapedurl
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
context = "movie"
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if ".com/serie" in url and "/miniseries" not in url:
action = "episodios"
extra = "serie"
context = "tvshow"
title = scrapertools.find_single_match(title, '([^-]+)')
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
1).strip()
else:
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
if title.endswith("gratis"): title = title[:-7]
if title.endswith("torrent"): title = title[:-8]
if title.endswith("en HD"): title = title[:-6]
if title == "":
title = title_alt
context_title = title_alt
show = title_alt
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
if calidad:
title = title + ' [' + calidad + "]"
if not 'array' in title:
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
context=["buscar_trailer"], infoLabels= {'year':year}))
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
tmdb.set_infoLabels(itemlist, True)
if url_next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
url=url_next_page, next_page=next_page, folder=True,
text_color='yellow', text_bold=True, modo = modo, plot = extra,
extra = page_extra))
return itemlist
def listado_busqueda(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
list_chars = [["ñ", "ñ"]]
for el in list_chars:
data = re.sub(r"%s" % el[0], el[1], data)
try:
get, post = scrapertools.find_single_match(data, '