Eliminados y actualizados
pelispekes, streamixcloud: Eliminados, web no funcionan clipwatching, dailymotion, thevid: actualizados
This commit is contained in:
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "pelispekes",
|
||||
"name": "PelisPekes",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "pelispekes.png",
|
||||
"banner": "pelispekes.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
]
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.url == "":
|
||||
item.url = "http://www.pelispekes.com/"
|
||||
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<div class="poster-media-card"[^<]+'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
|
||||
patron += '<div class="poster"[^<]+'
|
||||
patron += '<div class="title"[^<]+'
|
||||
patron += '<span[^<]+</span[^<]+'
|
||||
patron += '</div[^<]+'
|
||||
patron += '<span class="rating"[^<]+'
|
||||
patron += '<i[^<]+</i><span[^<]+</span[^<]+'
|
||||
patron += '</span[^<]+'
|
||||
patron += '<div class="poster-image-container"[^<]+'
|
||||
patron += '<img width="\d+" height="\d+" src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
|
||||
plot=plot, contentTitle=title, contentThumbnail=thumbnail))
|
||||
|
||||
# Extrae la pagina siguiente
|
||||
next_page_url = scrapertools.find_single_match(data,
|
||||
'<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url,
|
||||
viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("item=" + item.tostring())
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=")
|
||||
|
||||
item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>(.*?)<div')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
logger.info("plot=" + item.plot)
|
||||
|
||||
return servertools.find_video_items(item=item, data=data)
|
||||
@@ -22,7 +22,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
video_urls = []
|
||||
videos = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+).*?label:"([^"]+)')
|
||||
for video, label in videos:
|
||||
video_urls.append([label + " [clipwatching]", video])
|
||||
logger.info("Url: %s" % videos)
|
||||
if ".jpg" not in video:
|
||||
video_urls.append([label + " [clipwatching]", video])
|
||||
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
|
||||
return video_urls
|
||||
|
||||
@@ -8,6 +8,8 @@ from platformcode import logger
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
response = httptools.downloadpage(page_url)
|
||||
if "Contenido rechazado" in response.data:
|
||||
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
|
||||
if response.code == 404:
|
||||
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "streamix.cloud/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://streamix.cloud/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "streamixcloud",
|
||||
"name": "streamixcloud",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/NuD85Py.png?1"
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Not Found" in data or "File was deleted" in data:
|
||||
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
|
||||
if "Video is processing" in data:
|
||||
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
|
||||
packed = scrapertools.find_single_match(data, patron)
|
||||
data = jsunpack.unpack(packed)
|
||||
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
|
||||
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
|
||||
for url in media_url:
|
||||
video_urls.append(["%s [streamixcloud]" % ext, url])
|
||||
return video_urls
|
||||
@@ -11,6 +11,8 @@ def test_video_exists(page_url):
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Video not found..." in data:
|
||||
return False, config.get_localized_string(70292) % "Thevid"
|
||||
if "Video removed for inactivity..." in data:
|
||||
return False, "[Thevid] El video ha sido removido por inactividad"
|
||||
return True, ""
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user