@@ -4,349 +4,99 @@ import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
Host='http://www.tvsinpagar.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas"))
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url="http://www.newpct.com/anime/",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listado", title="Documentales", url="http://www.newpct.com/documentales/",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/"))
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/"))
|
||||
#itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url=Host+"/anime/",
|
||||
# viewmode="movie_with_plot"))
|
||||
#itemlist.append(
|
||||
# Item(channel=item.channel, action="listado", title="Documentales", url=Host+"/documentales/",
|
||||
# viewmode="movie_with_plot"))
|
||||
#itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
|
||||
item.url = "http://www.newpct.com/buscar-descargas/%s" % (texto)
|
||||
try:
|
||||
return buscador(item)
|
||||
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def buscador(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
# <td class="center" style="border-bottom:solid 1px cyan;">14-09-14</td><td style="border-bottom:solid 1px cyan;"><strong><a href="http://www.newpct.com/descargar-pelicula/malefica-3d-sbs/" title="Más información sobre Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"> <span class="searchTerm">Malefica</span> 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]</a></strong></td><td class="center" style="border-bottom:solid 1px cyan;">10.9 GB</td><td style="border-bottom:solid 1px cyan;"><a href="http://tumejorserie.com/descargar/index.php?link=torrents/059784.torrent" title="Descargar Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"><img src="http://newpct.com/v2/imagenes//buttons/download.png"
|
||||
|
||||
patron = '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' # createdate
|
||||
patron += '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' # info
|
||||
patron += '<a href="([^"]+)" ' # url
|
||||
patron += 'title="Descargar([^"]+)">' # title
|
||||
patron += '<img src="([^"]+)"' # thumbnail
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedcreatedate, scrapedinfo, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapedtitle + "(Tamaño:" + scrapedinfo + "--" + scrapedcreatedate + ")"
|
||||
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent",
|
||||
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, folder=True))
|
||||
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.title == "Películas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas DVDRIP-BRRIP Castellano",
|
||||
url="http://www.newpct.com/peliculas-castellano/peliculas-rip/",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas Latino",
|
||||
url="http://www.newpct.com/peliculas-latino/", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Estrenos de Cine Castellano",
|
||||
url="http://www.newpct.com/peliculas-castellano/estrenos-de-cine/",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas Alta Definicion HD",
|
||||
url="http://www.newpct.com/cine-alta-definicion-hd/", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas en 3D HD",
|
||||
url="http://www.newpct.com/peliculas-en-3d-hd/", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas DVDFULL",
|
||||
url="http://www.newpct.com/peliculas-castellano/peliculas-dvd/",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas V.O.Subtituladas",
|
||||
url="http://www.newpct.com/peliculas-vo/", viewmode="movie_with_plot"))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listado", title="HDTV Castellano", url="http://www.newpct.com/series/",
|
||||
category="serie", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Miniseries Castellano",
|
||||
url="http://www.newpct.com/miniseries-es/", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Series TV - V.O.S.E",
|
||||
url="http://www.newpct.com/series-vo/", category="serie", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Últimos Capítulos HD",
|
||||
url="http://www.newpct.com/series-alta-definicion-hd/", category="serie",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="series", title="Series HD [A-Z]",
|
||||
url="http://www.newpct.com/index.php?l=torrentListByCategory&subcategory_s=1469&more=listar",
|
||||
category="serie"))
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
|
||||
data_cat = scrapertools.find_single_match(data, patron)
|
||||
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
|
||||
'''
|
||||
<li>
|
||||
<a href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'>
|
||||
<div class='boxgrid captionb'>
|
||||
<img src='http://images.newpct.com/banco_de_imagenes/destacados/038707/la-pequeña-venecia--dvdrip--ac3-5-1-español-castellano--2012-.jpg' alt='Descargar Peliculas Castellano » Películas RIP La Pequeña Venecia [DVDrip][AC3 5.1 Español Castellano][2012]' />
|
||||
<div class='cover boxcaption'>
|
||||
<h3>La Pequeña Venecia </h3>
|
||||
<p>Peliculas Castellano<br/>
|
||||
Calidad: DVDRIP AC3 5.1<br>
|
||||
Tamaño: 1.1 GB<br>
|
||||
Idioma : Español Castellano
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
<div id='bot-desc'>
|
||||
<div id='tinfo'>
|
||||
<a class='youtube' href='#' rel='gx9EKDC0UFQ' title='Ver Trailer' alt='Ver Trailer'>
|
||||
<img style='width:25px;' src='http://www.newpct.com/images.inc/images/playm2.gif'></a>
|
||||
</div>
|
||||
<div id='tdescargar' ><a class='atdescargar' href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'>DESCARGAR</a></div>
|
||||
</div>
|
||||
</li>
|
||||
'''
|
||||
patron = "<li[^<]+"
|
||||
patron += "<a href='([^']+)'[^<]+"
|
||||
patron += "<div class='boxgrid captionb'[^<]+"
|
||||
patron += "<img src='([^']+)'[^<]+"
|
||||
patron += "<div class='cover boxcaption'[^<]+"
|
||||
patron += '<h3>([^<]+)</h3>(.*?)</div>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
|
||||
title = scrapedtitle.strip()
|
||||
title = unicode(title, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = scrapertools.htmlclean(scrapedplot).strip()
|
||||
plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if item.category == "serie":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle=title))
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="pelilist">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
|
||||
if 'Serie' in item.title:
|
||||
patron_listado+='.+?>'
|
||||
else:
|
||||
patron_listado+='>'
|
||||
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
|
||||
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
|
||||
if 'Serie' in item.title:
|
||||
action="episodios"
|
||||
else:
|
||||
action="findvideos"
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle))
|
||||
# Página siguiente
|
||||
'''
|
||||
GET /include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225 HTTP/1.1
|
||||
Host: www.newpct.com
|
||||
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0
|
||||
Accept: */*
|
||||
Accept-Language: es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3
|
||||
Accept-Encoding: gzip, deflate
|
||||
X-Requested-With: XMLHttpRequest
|
||||
Referer: http://www.newpct.com/peliculas-castellano/peliculas-rip/
|
||||
Cookie: adbooth_popunder=5%7CSat%2C%2009%20Mar%202013%2018%3A23%3A22%20GMT
|
||||
Connection: keep-alive
|
||||
'''
|
||||
|
||||
'''
|
||||
function orderCategory(type,leter,pag,other)
|
||||
{
|
||||
|
||||
|
||||
if(leter=='buscar')
|
||||
{
|
||||
leter = document.getElementById('word').value;
|
||||
}
|
||||
if(type=='todo')
|
||||
{
|
||||
document.getElementById('todo').className = "active_todo";
|
||||
}
|
||||
if(type=='letter')
|
||||
{
|
||||
switch(leter)
|
||||
{
|
||||
case '09':
|
||||
document.getElementById('09').className = "active_num";
|
||||
break;
|
||||
default:
|
||||
document.getElementById(leter).className = "active_a";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
var parametros = {
|
||||
"type" : type,
|
||||
"leter" : leter,
|
||||
"sql" : "SELECT DISTINCT torrentID, torrentCategoryID, torrentCategoryIDR, torrentImageID, torrentName, guid, torrentShortName, torrentLanguage, torrentSize, calidad as calidad_, torrentDescription, torrentViews, rating, n_votos, vistas_hoy, vistas_ayer, vistas_semana, vistas_mes FROM torrentsFiles as t WHERE (torrentStatus = 1 OR torrentStatus = 2) AND (torrentCategoryID IN (1537, 758, 1105, 760, 1225)) ORDER BY torrentDateAdded DESC LIMIT 0, 50",
|
||||
"pag" : pag,
|
||||
"tot" : '',
|
||||
"ban" : '3',
|
||||
"other": other,
|
||||
"cate" : '1225'
|
||||
|
||||
};
|
||||
//alert(type+leter);
|
||||
|
||||
$('#content-category').html('<div style="margin:100px auto;width:100px;height:100px;"><img src="http://www.newpct.com/images.inc/images/ajax-loader.gif"/></div>');
|
||||
var page = $(this).attr('data');
|
||||
var dataString = 'page='+page;
|
||||
|
||||
$.ajax({
|
||||
type: "GET",
|
||||
url: 'http://www.newpct.com/include.inc/ajax.php/orderCategory.php',
|
||||
data: parametros,
|
||||
success: function(data) {
|
||||
|
||||
//Cargamos finalmente el contenido deseado
|
||||
$('#content-category').fadeIn(1000).html(data);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
'''
|
||||
if item.extra != "":
|
||||
bloque = item.extra
|
||||
else:
|
||||
bloque = scrapertools.get_match(data, "function orderCategory(.*?)\}\)\;")
|
||||
logger.info("bloque=" + bloque)
|
||||
param_type = scrapertools.get_match(data, "<a href='javascript:;' onclick=\"orderCategory\('([^']+)'[^>]+> >> </a>")
|
||||
logger.info("param_type=" + param_type)
|
||||
param_leter = scrapertools.get_match(data,
|
||||
"<a href='javascript:;' onclick=\"orderCategory\('[^']+','([^']*)'[^>]+> >> </a>")
|
||||
logger.info("param_leter=" + param_leter)
|
||||
param_pag = scrapertools.get_match(data,
|
||||
"<a href='javascript:;' onclick=\"orderCategory\('[^']+','[^']*','([^']+)'[^>]+> >> </a>")
|
||||
logger.info("param_pag=" + param_pag)
|
||||
param_total = scrapertools.get_match(bloque, '"total"\s*\:\s*\'([^\']+)')
|
||||
logger.info("param_sql=" + param_total)
|
||||
param_sql = scrapertools.get_match(bloque, '"sql"\s*\:\s*\'([^\']+)')
|
||||
logger.info("param_sql=" + param_sql)
|
||||
param_tot = scrapertools.get_match(bloque, "\"tot\"\s*\:\s*'([^']*)'")
|
||||
logger.info("param_tot=" + param_tot)
|
||||
param_ban = scrapertools.get_match(bloque, "\"ban\"\s*\:\s*'([^']+)'")
|
||||
logger.info("param_ban=" + param_ban)
|
||||
param_cate = scrapertools.get_match(bloque, "\"cate\"\s*\:\s*'([^']+)'")
|
||||
logger.info("param_cate=" + param_cate)
|
||||
base_url = scrapertools.get_match(bloque, "url\s*\:\s*'([^']+)'")
|
||||
base_url = re.sub("../..", "http://www.newpct.com", base_url, count=1)
|
||||
logger.info("base_url=" + base_url)
|
||||
# http://www.newpct.com/include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225
|
||||
url_next_page = base_url + "?" + urllib.urlencode(
|
||||
{"total": param_total, "type": param_type, "leter": param_leter, "sql": param_sql, "pag": param_pag,
|
||||
"tot": param_tot, "ban": param_ban, "cate": param_cate})
|
||||
logger.info("url_next_page=" + url_next_page)
|
||||
if item.category == "serie":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque,
|
||||
category="serie", viewmode="movie_with_plot"))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
|
||||
siguiente = scrapertools.find_single_match(data, patron_pag)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# Lista menú Series de la A-Z
|
||||
data = scrapertools.cache_page(item.url)
|
||||
patron = '<div id="content-abc">(.*?)<\/div>'
|
||||
data = re.compile(patron, re.DOTALL | re.M).findall(data)
|
||||
patron = 'id="([^"]+)".*?>([^"]+)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL | re.M).findall(data[0])
|
||||
for id, scrapedtitle in matches:
|
||||
url_base = "http://www.newpct.com/include.inc/ajax.php/orderCategory.php?total=9&type=letter&leter=%s&sql=+%09%09SELECT++t.torrentID%2C++%09%09%09%09t.torrentCategoryID%2C++%09%09%09%09t.torrentCategoryIDR%2C++%09%09%09%09t.torrentImageID%2C++%09%09%09%09t.torrentName%2C++%09%09%09%09t.guid%2C++%09%09%09%09t.torrentShortName%2C+%09%09%09%09t.torrentLanguage%2C+%09%09%09%09t.torrentSize%2C+%09%09%09%09t.calidad+as+calidad_%2C+%09%09%09%09t.torrentDescription%2C+%09%09%09%09t.torrentViews%2C+%09%09%09%09t.rating%2C+%09%09%09%09t.n_votos%2C+%09%09%09%09t.vistas_hoy%2C+%09%09%09%09t.vistas_ayer%2C+%09%09%09%09t.vistas_semana%2C+%09%09%09%09t.vistas_mes%2C+%09%09%09%09t.imagen+FROM+torrentsFiles+as+t++%09%09LEFT+JOIN+torrentsCategories+as+tc+ON+(t.torrentCategoryID+%3D+tc.categoryID)++%09%09INNER+JOIN++%09%09(+%09%09%09SELECT+torrentID+%09%09%09FROM+torrentsFiles++%09%09%09WHERE++torrentCategoryIDR+%3D+1469+%09%09%09ORDER+BY+torrentID+DESC+%09%09)t1+ON+t1.torrentID+%3D+t.torrentID+WHERE+(t.torrentStatus+%3D+1+OR+t.torrentStatus+%3D+2)+AND+t.home_active+%3D+0++AND+tc.categoryIDR+%3D+1469+GROUP+BY+t.torrentCategoryID+ORDER+BY+t.torrentID+DESC+LIMIT+0%2C+50&pag=&tot=&ban=3&cate=1469"
|
||||
scrapedurl = url_base.replace("%s", id)
|
||||
if id != "todo": itemlist.append(
|
||||
Item(channel=item.channel, action="listaseries", title=scrapedtitle, url=scrapedurl, folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listaseries(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.downloadpageGzip(item.url)
|
||||
patron = "<li[^<]+<a href='([^']+)'>.*?<img src='([^']+)'.*?<h3>([^']+)<\/h3>"
|
||||
matches = re.compile(patron, re.DOTALL | re.M).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
patron = "<ul style='display:none;'.*?>(.*?)<\/ul>"
|
||||
data = re.compile(patron, re.DOTALL | re.M).findall(data)
|
||||
patron = "<a href='([^']+)'.*?title='([^']+)'"
|
||||
for index in range(len(data)):
|
||||
matches = re.compile(patron, re.DOTALL | re.M).findall(data[index])
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, folder=True))
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="buscar-list">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href=(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron)
|
||||
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
|
||||
if " al " in scrapedtitle:
|
||||
#action="episodios"
|
||||
titulo=scrapedurl.split('http')
|
||||
scrapedurl="http"+titulo[1]
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
new_item = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data = data)
|
||||
url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"')
|
||||
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
|
||||
itemlist.extend(new_item)
|
||||
for it in itemlist:
|
||||
it.channel = item.channel
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
|
||||
# <span id='content-torrent'> <a href='http://tumejorjuego.com/descargar/index.php?link=descargar/torrent/58591/el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014.html' rel='nofollow' id='58591' title='el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014' class='external-url' target='_blank'>
|
||||
torrent_url = scrapertools.find_single_match(data, "<span id='content-torrent'[^<]+<a href='([^']+)'")
|
||||
if torrent_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="play", title="Torrent", url=torrent_url, server="torrent"))
|
||||
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.title = "[" + videoitem.server + "]"
|
||||
|
||||
return itemlist
|
||||
scrapertools.printMatches(itemlist)
|
||||
return itemlist
|
||||
@@ -1,470 +1,102 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
Host='http://descargas2020.com'
|
||||
|
||||
host = 'http://newpct1.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/"))
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/"))
|
||||
#itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url=Host+"/anime/",
|
||||
# viewmode="movie_with_plot"))
|
||||
#itemlist.append(
|
||||
# Item(channel=item.channel, action="listado", title="Documentales", url=Host+"/documentales/",
|
||||
# viewmode="movie_with_plot"))
|
||||
#itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
|
||||
return itemlist
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
patron = '<li><a href="http://(?:www.)?newpct1.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
|
||||
data_cat = scrapertools.find_single_match(data, patron)
|
||||
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
patron = '<ul class="alfabeto">(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="pelilist">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
|
||||
if 'Serie' in item.title:
|
||||
patron_listado+='.+?>'
|
||||
else:
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<span>([^<].*?)<' # la calidad
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if "1.com/series" in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
1).strip()
|
||||
|
||||
patron_listado+='>'
|
||||
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
|
||||
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
|
||||
if 'Serie' in item.title:
|
||||
action="episodios"
|
||||
else:
|
||||
title = title.replace("Descargar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
action="findvideos"
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle))
|
||||
# Página siguiente
|
||||
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
|
||||
siguiente = scrapertools.find_single_match(data, patron_pag)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
|
||||
return itemlist
|
||||
|
||||
def listado2(item):
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
except:
|
||||
post = False
|
||||
|
||||
if post:
|
||||
if "pg" in item.post:
|
||||
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
|
||||
else:
|
||||
item.post += "&pg=%s" % post
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
else:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="buscar-list">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href=(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron)
|
||||
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
|
||||
if " al " in scrapedtitle:
|
||||
#action="episodios"
|
||||
titulo=scrapedurl.split('http')
|
||||
scrapedurl="http"+titulo[1]
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
new_item = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data = data)
|
||||
url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"')
|
||||
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
|
||||
itemlist.extend(new_item)
|
||||
for it in itemlist:
|
||||
it.channel = item.channel
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace("1.com/","1.com/ver-online/")
|
||||
# item.url = item.url.replace("1.com/","1.com/descarga-directa/")
|
||||
item.url = item.url.replace("1.com/", "1.com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)</strong>[^<]+</h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+</strong>([^<]+)</h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
# <a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a>
|
||||
|
||||
patron = 'openTorrent.*?"title=".*?" class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
if url != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title + " [torrent]", fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
data = data.replace("'", '"')
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=', "")
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "")
|
||||
data = data.replace("$!", "#!")
|
||||
|
||||
patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
p = 1
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"],
|
||||
match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"], match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
|
||||
str_lang = ""
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang,
|
||||
match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
|
||||
match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
scrapertools.printMatches(itemlist)
|
||||
return itemlist
|
||||
Reference in New Issue
Block a user