# -*- coding: utf-8 -*- import re import urllib import urlparse from core import scrapertools from core.item import Item from platformcode import logger def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, action="submenu", title="Películas")) itemlist.append(Item(channel=item.channel, action="submenu", title="Series")) itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url="http://www.newpct.com/anime/", viewmode="movie_with_plot")) itemlist.append( Item(channel=item.channel, action="listado", title="Documentales", url="http://www.newpct.com/documentales/", viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = "http://www.newpct.com/buscar-descargas/%s" % (texto) try: return buscador(item) # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def buscador(item): logger.info() itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # 14-09-14 Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]10.9 GB
Descargar Peliculas Castellano » Películas RIP La Pequeña Venecia [DVDrip][AC3 5.1 Español Castellano][2012]

La Pequeña Venecia

Peliculas Castellano
Calidad: DVDRIP AC3 5.1
Tamaño: 1.1 GB
Idioma : Español Castellano

DESCARGAR
''' patron = "'); var page = $(this).attr('data'); var dataString = 'page='+page; $.ajax({ type: "GET", url: 'http://www.newpct.com/include.inc/ajax.php/orderCategory.php', data: parametros, success: function(data) { //Cargamos finalmente el contenido deseado $('#content-category').fadeIn(1000).html(data); } }); } ''' if item.extra != "": bloque = item.extra else: bloque = scrapertools.get_match(data, "function orderCategory(.*?)\}\)\;") logger.info("bloque=" + bloque) param_type = scrapertools.get_match(data, "]+> >> ") logger.info("param_type=" + param_type) param_leter = scrapertools.get_match(data, "]+> >> ") logger.info("param_leter=" + param_leter) param_pag = scrapertools.get_match(data, "]+> >> ") logger.info("param_pag=" + param_pag) param_total = scrapertools.get_match(bloque, '"total"\s*\:\s*\'([^\']+)') logger.info("param_sql=" + param_total) param_sql = scrapertools.get_match(bloque, '"sql"\s*\:\s*\'([^\']+)') logger.info("param_sql=" + param_sql) param_tot = scrapertools.get_match(bloque, "\"tot\"\s*\:\s*'([^']*)'") logger.info("param_tot=" + param_tot) param_ban = scrapertools.get_match(bloque, "\"ban\"\s*\:\s*'([^']+)'") logger.info("param_ban=" + param_ban) param_cate = scrapertools.get_match(bloque, "\"cate\"\s*\:\s*'([^']+)'") logger.info("param_cate=" + param_cate) base_url = scrapertools.get_match(bloque, "url\s*\:\s*'([^']+)'") base_url = re.sub("../..", "http://www.newpct.com", base_url, count=1) logger.info("base_url=" + base_url) # http://www.newpct.com/include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225 url_next_page = base_url + "?" + urllib.urlencode( {"total": param_total, "type": param_type, "leter": param_leter, "sql": param_sql, "pag": param_pag, "tot": param_tot, "ban": param_ban, "cate": param_cate}) logger.info("url_next_page=" + url_next_page) if item.category == "serie": itemlist.append( Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque, category="serie", viewmode="movie_with_plot")) else: itemlist.append( Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque, viewmode="movie_with_plot")) return itemlist def series(item): logger.info() itemlist = [] # Lista menú Series de la A-Z data = scrapertools.cache_page(item.url) patron = '
(.*?)<\/div>' data = re.compile(patron, re.DOTALL | re.M).findall(data) patron = 'id="([^"]+)".*?>([^"]+)<\/a>' matches = re.compile(patron, re.DOTALL | re.M).findall(data[0]) for id, scrapedtitle in matches: url_base = "http://www.newpct.com/include.inc/ajax.php/orderCategory.php?total=9&type=letter&leter=%s&sql=+%09%09SELECT++t.torrentID%2C++%09%09%09%09t.torrentCategoryID%2C++%09%09%09%09t.torrentCategoryIDR%2C++%09%09%09%09t.torrentImageID%2C++%09%09%09%09t.torrentName%2C++%09%09%09%09t.guid%2C++%09%09%09%09t.torrentShortName%2C+%09%09%09%09t.torrentLanguage%2C+%09%09%09%09t.torrentSize%2C+%09%09%09%09t.calidad+as+calidad_%2C+%09%09%09%09t.torrentDescription%2C+%09%09%09%09t.torrentViews%2C+%09%09%09%09t.rating%2C+%09%09%09%09t.n_votos%2C+%09%09%09%09t.vistas_hoy%2C+%09%09%09%09t.vistas_ayer%2C+%09%09%09%09t.vistas_semana%2C+%09%09%09%09t.vistas_mes%2C+%09%09%09%09t.imagen+FROM+torrentsFiles+as+t++%09%09LEFT+JOIN+torrentsCategories+as+tc+ON+(t.torrentCategoryID+%3D+tc.categoryID)++%09%09INNER+JOIN++%09%09(+%09%09%09SELECT+torrentID+%09%09%09FROM+torrentsFiles++%09%09%09WHERE++torrentCategoryIDR+%3D+1469+%09%09%09ORDER+BY+torrentID+DESC+%09%09)t1+ON+t1.torrentID+%3D+t.torrentID+WHERE+(t.torrentStatus+%3D+1+OR+t.torrentStatus+%3D+2)+AND+t.home_active+%3D+0++AND+tc.categoryIDR+%3D+1469+GROUP+BY+t.torrentCategoryID+ORDER+BY+t.torrentID+DESC+LIMIT+0%2C+50&pag=&tot=&ban=3&cate=1469" scrapedurl = url_base.replace("%s", id) if id != "todo": itemlist.append( Item(channel=item.channel, action="listaseries", title=scrapedtitle, url=scrapedurl, folder=True)) return itemlist def listaseries(item): logger.info() itemlist = [] data = scrapertools.downloadpageGzip(item.url) patron = ".*?([^']+)<\/h3>" matches = re.compile(patron, re.DOTALL | re.M).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, folder=True)) return itemlist def episodios(item): logger.info() itemlist = [] data = scrapertools.cache_page(item.url) patron = "