# -*- coding: utf-8 -*- import re import urlparse from core import scrapertools from core import servertools from core.item import Item from platformcode import logger def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas", url="http://www.bajui2.com/descargas/categoria/2/peliculas", fanart=item.fanart)) itemlist.append(Item(channel=item.channel, title="Series", action="menuseries", fanart=item.fanart)) itemlist.append(Item(channel=item.channel, title="Documentales", action="menudocumentales", fanart=item.fanart)) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", fanart=item.fanart)) return itemlist def menupeliculas(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, title="Películas - Novedades", action="peliculas", url=item.url, fanart=item.fanart, viewmode="movie_with_plot")) itemlist.append( Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre", fanart=item.fanart, viewmode="movie_with_plot")) # data = scrapertools.cache_page(item.url) data = scrapertools.get_match(data, '') patron = '([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) for url, title in matches: scrapedurl = urlparse.urljoin(item.url, url) itemlist.append(Item(channel=item.channel, title="Películas en " + title, action="peliculas", url=scrapedurl, fanart=item.fanart, viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", fanart=item.fanart)) return itemlist def menuseries(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas", url="http://www.bajui2.com/descargas/categoria/3/series", fanart=item.fanart, viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas", url="http://www.bajui2.com/descargas/categoria/3/series/orden:nombre", fanart=item.fanart, viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas", url="http://www.bajui2.com/descargas/subcategoria/11/hd/orden:nombre", fanart=item.fanart, viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", fanart=item.fanart)) return itemlist def menudocumentales(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas", url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv", fanart=item.fanart, viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas", url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv/orden:nombre", fanart=item.fanart, viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", fanart=item.fanart)) return itemlist # Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro def search(item, texto, categoria=""): logger.info(item.url + " search " + texto) itemlist = [] url = item.url texto = texto.replace(" ", "+") logger.info("categoria: " + categoria + " url: " + url) try: item.url = "http://www.bajui2.com/descargas/busqueda/%s" item.url = item.url % texto itemlist.extend(peliculas(item)) return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def peliculas(item, paginacion=True): logger.info() url = item.url # Descarga la página data = scrapertools.cache_page(url) patron = '
  • 0: scrapedurl = urlparse.urljoin("http://www.bajui2.com/", matches[0]) pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl, fanart=item.fanart, viewmode="movie_with_plot") if not paginacion: itemlist.extend(peliculas(pagitem)) else: itemlist.append(pagitem) return itemlist def clean_plot(scrapedplot): scrapedplot = scrapedplot.replace("\n", "").replace("\r", "") scrapedplot = re.compile("TÍTULO ORIGINAL[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("AÑO[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Año[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("DURACIÓN[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Duración[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("PAIS[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("PAÍS[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Pais[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("País[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("DIRECTOR[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("DIRECCIÓN[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Dirección[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("REPARTO[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Reparto[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Interpretación[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("GUIÓN[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Guión[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("MÚSICA[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Música[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("FOTOGRAFÍA[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Fotografía[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("PRODUCTORA[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Producción[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Montaje[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Vestuario[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("GÉNERO[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("GENERO[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Genero[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Género[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("PREMIOS[^<]+
    ", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("SINOPSIS", re.DOTALL).sub("", scrapedplot) scrapedplot = re.compile("Sinopsis", re.DOTALL).sub("", scrapedplot) scrapedplot = scrapertools.htmlclean(scrapedplot) return scrapedplot def enlaces(item): logger.info() itemlist = [] data = scrapertools.cache_page(item.url) try: item.plot = scrapertools.get_match(data, '(.*?)') item.plot = clean_plot(item.plot) except: pass try: item.thumbnail = scrapertools.get_match(data, '
  • Enlaces de: jerobien
    Actualizado: Hace 8 minutos
    uploaded.combitshare.comfreakshare.comletitbit.netturbobit.netrapidgator.netclz.to
    ''' patron = '
    ]+>Mostrar enlaces
    [^<]+' patron += '
    (.*?)
    ' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) logger.debug("matches=" + repr(matches)) for thumbnail, usuario, fecha, id, id2, servidores in matches: # bitshare.comfreakshare.comrapidgator.netturbobit.netmuchshare.netletitbit.netshareflare.netOtros patronservidores = '