Eliminados
seriecanal: web casi todo: solo para "donadores" tusfalise, vidspot: servidores no funcionan
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "seriecanal",
|
||||
"name": "Seriecanal",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://i.imgur.com/EwMK8Yd.png",
|
||||
|
||||
@@ -4,12 +4,14 @@ import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal")
|
||||
__perfil__ = config.get_setting('perfil', "descargasmix")
|
||||
__perfil__ = config.get_setting('perfil', "seriecanal")
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
|
||||
@@ -17,23 +19,21 @@ perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
|
||||
color1, color2, color3 = perfil[__perfil__]
|
||||
|
||||
URL_BASE = "http://www.seriecanal.com/"
|
||||
host = "https://www.seriecanal.com/"
|
||||
|
||||
|
||||
def login():
|
||||
logger.info()
|
||||
data = scrapertools.downloadpage(URL_BASE)
|
||||
data = httptools.downloadpage(host).data
|
||||
if "Cerrar Sesion" in data:
|
||||
return True, ""
|
||||
|
||||
usuario = config.get_setting("user", "seriecanal")
|
||||
password = config.get_setting("password", "seriecanal")
|
||||
if usuario == "" or password == "":
|
||||
return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"'
|
||||
else:
|
||||
post = urllib.urlencode({'username': usuario, 'password': password})
|
||||
data = scrapertools.downloadpage("http://www.seriecanal.com/index.php?page=member&do=login&tarea=acceder",
|
||||
post=post)
|
||||
data = httptools.downloadpage(host + "index.php?page=member&do=login&tarea=acceder", post=post).data
|
||||
if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data:
|
||||
return True, ""
|
||||
else:
|
||||
@@ -44,18 +44,15 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
|
||||
result, message = login()
|
||||
if result:
|
||||
itemlist.append(item.clone(action="series", title="Últimos episodios", url=URL_BASE))
|
||||
itemlist.append(item.clone(action="series", title="Últimos episodios", url=host))
|
||||
itemlist.append(item.clone(action="genero", title="Series por género"))
|
||||
itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético"))
|
||||
itemlist.append(item.clone(action="search", title="Buscar..."))
|
||||
else:
|
||||
itemlist.append(item.clone(action="", title=message, text_color="red"))
|
||||
|
||||
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -68,7 +65,7 @@ def configuracion(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = "http://www.seriecanal.com/index.php?page=portada&do=category&method=post&category_id=0&order=" \
|
||||
item.url = host + "index.php?page=portada&do=category&method=post&category_id=0&order=" \
|
||||
"C_Create&view=thumb&pgs=1&p2=1"
|
||||
try:
|
||||
post = "keyserie=" + texto
|
||||
@@ -85,27 +82,24 @@ def search(item, texto):
|
||||
def genero(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(URL_BASE)
|
||||
data = httptools.downloadpage(host).data
|
||||
data = scrapertools.find_single_match(data, '<ul class="tag-cloud">(.*?)</ul>')
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>')
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.capitalize()
|
||||
url = urlparse.urljoin(URL_BASE, scrapedurl)
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
itemlist.append(item.clone(action="series", title=scrapedtitle, url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(URL_BASE)
|
||||
data = httptools.downloadpage(host).data
|
||||
data = scrapertools.find_single_match(data, '<ul class="pagination pagination-sm" style="margin:5px 0;">(.*?)</ul>')
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>')
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(URL_BASE, scrapedurl)
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
itemlist.append(item.clone(action="series", title=scrapedtitle, url=url))
|
||||
return itemlist
|
||||
|
||||
@@ -115,45 +109,38 @@ def series(item):
|
||||
itemlist = []
|
||||
item.infoLabels = {}
|
||||
item.text_color = color2
|
||||
|
||||
if item.extra != "":
|
||||
data = scrapertools.downloadpage(item.url, post=item.extra)
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
else:
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<div class="item-inner" style="margin: 0 20px 0px 0\;"><img src="([^"]+)".*?' \
|
||||
'href="([^"]+)" title="Click para Acceder a la Ficha(?:\|([^"]+)|)".*?' \
|
||||
'<strong>([^"]+)</strong></a>.*?<strong>([^"]+)</strong></p>.*?' \
|
||||
'<p class="text-warning".*?\;">(.*?)</p>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches:
|
||||
title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi
|
||||
url = urlparse.urljoin(URL_BASE, scrapedurl)
|
||||
temporada = scrapertools.find_single_match(scrapedtemp, "(\d+)")
|
||||
new_item = item.clone()
|
||||
new_item.contentType = "tvshow"
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
temporada = scrapertools.find_single_match(scrapedtemp, "\d+")
|
||||
episode = scrapertools.find_single_match(scrapedepi, "\d+")
|
||||
#item.contentType = "tvshow"
|
||||
if temporada != "":
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.contentType = "season"
|
||||
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + scrapedthumbnail + "]")
|
||||
itemlist.append(new_item.clone(action="findvideos", title=title, fulltitle=scrapedtitle, url=url,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, contentTitle=scrapedtitle,
|
||||
context=["buscar_trailer"], show=scrapedtitle))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
item.infoLabels['season'] = temporada
|
||||
#item.contentType = "season"
|
||||
if episode != "":
|
||||
item.infoLabels['episode'] = episode
|
||||
#item.contentType = "episode"
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url,
|
||||
contentSerieName=scrapedtitle,
|
||||
context=["buscar_trailer"]))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
# Extra marca siguiente página
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" (?:onclick="return false;" |)title='
|
||||
'"Página Siguiente"')
|
||||
if next_page != "/":
|
||||
url = urlparse.urljoin(URL_BASE, next_page)
|
||||
url = urlparse.urljoin(host, next_page)
|
||||
itemlist.append(item.clone(action="series", title=">> Siguiente", url=url, text_color=color3))
|
||||
|
||||
return itemlist
|
||||
@@ -163,10 +150,8 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color3
|
||||
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
# Busca en la seccion descarga/torrent
|
||||
data_download = scrapertools.find_single_match(data, '<th>Episodio - Enlaces de Descarga</th>(.*?)</table>')
|
||||
patron = '<p class="item_name".*?<a href="([^"]+)".*?>([^"]+)</a>'
|
||||
@@ -178,18 +163,15 @@ def findvideos(item):
|
||||
else:
|
||||
scrapedtitle = "[Torrent] " + scrapedepi
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
|
||||
|
||||
new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)")
|
||||
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
|
||||
itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent",
|
||||
contentType="episode"))
|
||||
|
||||
# Busca en la seccion online
|
||||
data_online = scrapertools.find_single_match(data, "<th>Enlaces de Visionado Online</th>(.*?)</table>")
|
||||
patron = '<a href="([^"]+)\\n.*?src="([^"]+)".*?' \
|
||||
'title="Enlace de Visionado Online">([^"]+)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data_online, patron)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedtitle in matches:
|
||||
# Deshecha enlaces de trailers
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
|
||||
@@ -200,7 +182,6 @@ def findvideos(item):
|
||||
|
||||
new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)")
|
||||
itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode"))
|
||||
|
||||
# Comprueba si hay otras temporadas
|
||||
if not "No hay disponible ninguna Temporada adicional" in data:
|
||||
data_temp = scrapertools.find_single_match(data, '<div class="panel panel-success">(.*?)</table>')
|
||||
@@ -210,7 +191,7 @@ def findvideos(item):
|
||||
matches = scrapertools.find_multiple_matches(data_temp, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
new_item = item.clone()
|
||||
url = urlparse.urljoin(URL_BASE, scrapedurl)
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
scrapedtitle = scrapedtitle.capitalize()
|
||||
temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)")
|
||||
if temporada != "":
|
||||
@@ -218,13 +199,7 @@ def findvideos(item):
|
||||
new_item.infoLabels['episode'] = ""
|
||||
itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red",
|
||||
contentType="season"))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
new_item = item.clone()
|
||||
if config.is_xbmc():
|
||||
new_item.contextual = True
|
||||
@@ -236,7 +211,6 @@ def findvideos(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.extra == "torrent":
|
||||
itemlist.append(item.clone())
|
||||
else:
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
if "tusfiles.net" in page_url:
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "File Not Found" in data:
|
||||
return False, "[Tusfiles] El archivo no existe o ha sido borrado"
|
||||
if "download is no longer available" in data:
|
||||
return False, "[Tusfiles] El archivo ya no está disponible"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("page_url='%s'" % page_url)
|
||||
|
||||
# Saca el código del vídeo
|
||||
data = httptools.downloadpage(page_url).data.replace("\\", "")
|
||||
video_urls = []
|
||||
|
||||
if "tusfiles.org" in page_url:
|
||||
matches = scrapertools.find_multiple_matches(data,
|
||||
'"label"\s*:\s*(.*?),"type"\s*:\s*"([^"]+)","file"\s*:\s*"([^"]+)"')
|
||||
for calidad, tipo, video_url in matches:
|
||||
tipo = tipo.replace("video/", "")
|
||||
video_urls.append([".%s %sp [tusfiles]" % (tipo, calidad), video_url])
|
||||
|
||||
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
|
||||
else:
|
||||
matches = scrapertools.find_multiple_matches(data, '<source src="([^"]+)" type="([^"]+)"')
|
||||
for video_url, tipo in matches:
|
||||
tipo = tipo.replace("video/", "")
|
||||
video_urls.append([".%s [tusfiles]" % tipo, video_url])
|
||||
|
||||
id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
|
||||
rand = scrapertools.find_single_match(data, 'name="rand" value="([^"]+)"')
|
||||
if id and rand:
|
||||
post = "op=download2&id=%s&rand=%s&referer=&method_free=&method_premium=" % (id, rand)
|
||||
location = httptools.downloadpage(page_url, post, follow_redirects=False, only_headers=True).headers.get(
|
||||
"location")
|
||||
if location:
|
||||
ext = location[-4:]
|
||||
video_urls.append(["%s [tusfiles]" % ext, location])
|
||||
|
||||
return video_urls
|
||||
@@ -1,73 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [
|
||||
"http://vidspot.net/embed-theme.html",
|
||||
"http://vidspot.net/embed-jquery.html",
|
||||
"http://vidspot.net/embed-s.html",
|
||||
"http://vidspot.net/embed-images.html",
|
||||
"http://vidspot.net/embed-faq.html",
|
||||
"http://vidspot.net/embed-embed.html",
|
||||
"http://vidspot.net/embed-ri.html",
|
||||
"http://vidspot.net/embed-d.html",
|
||||
"http://vidspot.net/embed-css.html",
|
||||
"http://vidspot.net/embed-js.html",
|
||||
"http://vidspot.net/embed-player.html",
|
||||
"http://vidspot.net/embed-cgi.html",
|
||||
"http://vidspot.net/embed-i.html",
|
||||
"http://vidspot.net/images",
|
||||
"http://vidspot.net/theme",
|
||||
"http://vidspot.net/xupload",
|
||||
"http://vidspot.net/s",
|
||||
"http://vidspot.net/js",
|
||||
"http://vidspot.net/jquery",
|
||||
"http://vidspot.net/login",
|
||||
"http://vidspot.net/make",
|
||||
"http://vidspot.net/i",
|
||||
"http://vidspot.net/faq",
|
||||
"http://vidspot.net/tos",
|
||||
"http://vidspot.net/premium",
|
||||
"http://vidspot.net/checkfiles",
|
||||
"http://vidspot.net/privacy",
|
||||
"http://vidspot.net/refund",
|
||||
"http://vidspot.net/links",
|
||||
"http://vidspot.net/contact"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "vidspot.(?:net/|php\\?id=)(?:embed-)?([a-z0-9]+)",
|
||||
"url": "http://vidspot.net/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidspot",
|
||||
"name": "vidspot",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_vidspot.png"
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
# No existe / borrado: http://vidspot.net/8jcgbrzhujri
|
||||
data = scrapertools.cache_page("http://anonymouse.org/cgi-bin/anon-www.cgi/" + page_url)
|
||||
if "File Not Found" in data or "Archivo no encontrado" in data or '<b class="err">Deleted' in data \
|
||||
or '<b class="err">Removed' in data or '<font class="err">No such' in data:
|
||||
return False, "No existe o ha sido borrado de vidspot"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=%s" % page_url)
|
||||
|
||||
# Normaliza la URL
|
||||
videoid = scrapertools.get_match(page_url, "http://vidspot.net/([a-z0-9A-Z]+)")
|
||||
page_url = "http://vidspot.net/embed-%s-728x400.html" % videoid
|
||||
data = scrapertools.cachePage(page_url)
|
||||
if "Access denied" in data:
|
||||
geobloqueo = True
|
||||
else:
|
||||
geobloqueo = False
|
||||
|
||||
if geobloqueo:
|
||||
url = "http://www.videoproxy.co/hide.php"
|
||||
post = "go=%s" % page_url
|
||||
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
|
||||
url = "http://www.videoproxy.co/%s" % location
|
||||
data = scrapertools.cachePage(url)
|
||||
|
||||
# Extrae la URL
|
||||
media_url = scrapertools.find_single_match(data, '"file" : "([^"]+)",')
|
||||
|
||||
video_urls = []
|
||||
|
||||
if media_url != "":
|
||||
if geobloqueo:
|
||||
url = "http://www.videoproxy.co/hide.php"
|
||||
post = "go=%s" % media_url
|
||||
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
|
||||
media_url = "http://www.videoproxy.co/%s&direct=false" % location
|
||||
else:
|
||||
media_url += "&direct=false"
|
||||
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [vidspot]", media_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user