# -*- coding: utf-8 -*- import re import urllib import urlparse from core import httptools from core import scrapertools from core import servertools from core.item import Item from platformcode import logger def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, action="menu", title="Películas", url="http://www.divxatope1.com/", extra="Peliculas", folder=True)) itemlist.append( Item(channel=item.channel, action="menu", title="Series", url="http://www.divxatope1.com", extra="Series", folder=True)) itemlist.append(Item(channel=item.channel, action="search", title="Buscar...")) return itemlist def menu(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data # logger.info("data="+data) data = scrapertools.find_single_match(data, item.extra + "") # logger.info("data="+data) patron = "
  • ]+>([^<]+)
  • " matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle url = urlparse.urljoin(item.url, scrapedurl) thumbnail = "" plot = "" itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot, folder=True)) if title != "Todas las Peliculas": itemlist.append( Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, plot=plot, folder=True)) if item.extra == "Peliculas": title = "4k UltraHD" url = "http://divxatope1.com/peliculas-hd/4kultrahd/" thumbnail = "" plot = "" itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot, folder=True)) itemlist.append( Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, plot=plot, folder=True)) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = "http://www.divxatope1.com/buscar/descargas" item.extra = urllib.urlencode({'q': texto}) try: itemlist = lista(item) # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo dict_aux = {} for i in itemlist: if not i.url in dict_aux: dict_aux[i.url] = i else: itemlist.remove(i) return itemlist # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def newest(categoria): itemlist = [] item = Item() try: if categoria == 'peliculas': item.url = "http://www.divxatope1.com/peliculas" elif categoria == 'series': item.url = "http://www.divxatope1.com/series" else: return [] itemlist = lista(item) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo dict_aux = {} for i in itemlist: if not i.url in dict_aux: dict_aux[i.url] = i else: itemlist.remove(i) # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] # return dict_aux.values() return itemlist def alfabetico(item): logger.info() itemlist = [] data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") patron = '' data = scrapertools.get_match(data, patron) patron = ']+>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle.upper() url = scrapedurl itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) return itemlist def lista(item): logger.info() itemlist = [] # Descarga la pagina data = httptools.downloadpage(item.url, post=item.extra).data # logger.info("data="+data) bloque = scrapertools.find_single_match(data, '(?: