@@ -236,31 +236,17 @@ def episodiosxtemp(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
lang=[]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_items = servertools.find_video_items(item)
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
language_items=scrapertools.find_single_match(data,
|
||||
'<ul class=tabs-sidebar-ul>(.+?)<\/ul>')
|
||||
matches=scrapertools.find_multiple_matches(language_items,
|
||||
'<li><a href=#ts(.+?)><span>(.+?)<\/span><\/a><\/li>')
|
||||
for idl,scrapedlang in matches:
|
||||
if int(idl)<5 and int(idl)!=1:
|
||||
lang.append(scrapedlang)
|
||||
i=0
|
||||
logger.info(lang)
|
||||
for videoitem in video_items:
|
||||
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
|
||||
#videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
|
||||
# 'class="f-info-text">(.*?)<\/span>')
|
||||
if len(lang)<=i:
|
||||
videoitem.language=lang[i]
|
||||
else:
|
||||
videoitem.language=lang[len(lang)-1]
|
||||
videoitem.language = scrapertools.find_single_match(data, '<span class=f-info-title>Idioma:<\/span>\s*<span '
|
||||
'class=f-info-text>(.*?)<\/span>')
|
||||
|
||||
videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
|
||||
videoitem.quality = 'default'
|
||||
videoitem.context = item.context
|
||||
i=i+1
|
||||
itemlist.append(videoitem)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
@@ -158,14 +158,13 @@ def youtube_search(item):
|
||||
titulo = urllib.quote(titulo)
|
||||
titulo = titulo.replace("%20", "+")
|
||||
data = scrapertools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo)
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<span class="yt-thumb-simple">.*?(?:src="https://i.ytimg.com/|data-thumb="https://i.ytimg.com/)([^"]+)"' \
|
||||
'.*?<h3 class="yt-lockup-title ">.*?<a href="([^"]+)".*?title="([^"]+)".*?' \
|
||||
'</a><span class="accessible-description".*?>.*?(\d+:\d+)'
|
||||
patron = """"thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?"""
|
||||
patron += """simpleText":"([^"]+).*?"""
|
||||
patron += """simpleText":"[^"]+.*?simpleText":"([^"]+).*?"""
|
||||
patron += """url":"([^"]+)"""
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matches:
|
||||
scrapedthumbnail = urlparse.urljoin("https://i.ytimg.com/", scrapedthumbnail)
|
||||
for scrapedthumbnail, scrapedtitle, scrapedduration, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.decode("utf-8")
|
||||
scrapedtitle = scrapedtitle + " (" + scrapedduration + ")"
|
||||
if item.contextual:
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "vernovelasonline",
|
||||
"name": "Ver Novelas Online",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://s16.postimg.org/g4lzydrmd/vernovelasonline1.png",
|
||||
"banner": "https://s16.postimg.org/w44nhxno5/vernovelasonline2.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://ver-novelas-online.com/"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Ultimos capitulos subidos", action = "capitulos_ultimos", url = host))
|
||||
itemlist.append(Item(channel = item.channel, title = "Novelas por letra", action = "novelas_letra", url = host + "video/category/letra-" ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Novelas en emision (Sin caratulas)", action = "novelas_emision", url = host))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar novela", action = "search", url = host + "?s="))
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.channel = "vernovelasonline"
|
||||
item.extra = "newest"
|
||||
item.url = "http://www.ver-novelas-online.com/"
|
||||
item.action = "capitulos_ultimos"
|
||||
itemlist = capitulos_ultimos(item)
|
||||
# Se captura la excepcion, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def novelas_emision(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace("\n","")
|
||||
block = scrapertools.find_single_match(data, '<aside id="text-2.*?</aside>')
|
||||
match = scrapertools.find_multiple_matches(block, 'a href="([^"]+)">([^<]+)')
|
||||
for url, titulo in match:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "capitulos_de_una_novela",
|
||||
title = titulo,
|
||||
url = url,
|
||||
extra1 = titulo
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def novelas_letra(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
for letra in "abcdefghijklmnopqrstuvwxyz":
|
||||
itemlist.append(item.clone(title = letra.upper(), url = item.url+letra, action = "lista"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ","+")
|
||||
item.url = "http://ver-novelas-online.com/?s=" + texto
|
||||
item.extra = "busca"
|
||||
if texto!='':
|
||||
return lista(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace("\n","")
|
||||
accion = "capitulos_de_una_novela"
|
||||
patron = """itemprop="url" href="([^"]+)".*?mark">([^<]*)</a>.*?href="([^"]+)"""
|
||||
if item.extra == "busca":
|
||||
patron = """itemprop="url" href="([^"]+)".*?mark">([^<]*)</a>.*?href='([^']+)"""
|
||||
accion = "findvideos"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, title, thumbnail in matches:
|
||||
mtitle = title.replace("CAPITULOS COMPLETOS","").title()
|
||||
mextra1 = scrapertools.find_single_match(mtitle, "(?i)(.*?) Capitulo")
|
||||
mextra2 = scrapertools.find_single_match(mtitle, "(?i)(cap.*?[0-9]+)").title()
|
||||
if mextra1 == "":
|
||||
mextra1 = mextra2 = mtitle
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = accion,
|
||||
title = mtitle,
|
||||
url = url,
|
||||
thumbnail = thumbnail,
|
||||
fantart = thumbnail,
|
||||
plot = "prueba de plot",
|
||||
extra1 = mextra1,
|
||||
extra2 = mextra2
|
||||
))
|
||||
mpagina = scrapertools.find_single_match(data, 'page-numbers" href="([^"]+)')
|
||||
pagina = scrapertools.find_single_match(mpagina, "page/([0-9]+)")
|
||||
if len(pagina)>0 and "busca" not in item.extra:
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
action = "lista",
|
||||
title = "Pagina: "+pagina,
|
||||
url = mpagina,
|
||||
extra = item.extra
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def capitulos_ultimos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace("\n","")
|
||||
patron = "<div class='item'>.*?<a href='([^']+)"
|
||||
patron += ".*?title='([^']+)"
|
||||
patron += ".*?img src='([^']+)"
|
||||
matches = scrapertools.find_multiple_matches(data,patron)
|
||||
for url, title, thumbnail in matches:
|
||||
mextra1 = scrapertools.find_single_match(title, "(?i)(.*?) Capitulo")
|
||||
mextra2 = scrapertools.find_single_match(title, "(?i)(cap.*?[0-9]+)").title()
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
title = title.title(),
|
||||
url = url,
|
||||
thumbnail = thumbnail,
|
||||
extra1 = mextra1,
|
||||
extra2 = mextra2
|
||||
))
|
||||
mpagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
|
||||
pagina = scrapertools.find_single_match(mpagina, "page/([0-9]+)")
|
||||
if "newest" not in item.extra:
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
action = "capitulos_ultimos",
|
||||
title = "Pagina: "+pagina,
|
||||
url = mpagina
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def capitulos_de_una_novela(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url = item.url
|
||||
data = httptools.downloadpage(url).data
|
||||
if len(item.thumbnail) == 0:
|
||||
item.thumbnail = scrapertools.find_single_match(data, 'og:image" content="([^"]+)' )
|
||||
matches = scrapertools.find_multiple_matches(data, '<a target="_blank" href="([^"]+)">([^<]+)')
|
||||
|
||||
for url, titulo in matches:
|
||||
mextra2 = scrapertools.find_single_match(titulo,"(?i)(cap.*?[0-9]+)")
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
title = titulo,
|
||||
thumbnail = item.thumbnail,
|
||||
url = url,
|
||||
extra1 = item.extra1,
|
||||
extra2 = mextra2
|
||||
))
|
||||
itemlist.append(Item(channel = item.channel, title = "Novela: [COLOR=blue]" + item.extra1 + "[/COLOR]"))
|
||||
# PARA INVERTIR EL ORDEN DE LA LISTA
|
||||
itemlist = itemlist[::-1]
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace(""","").replace("\n","").replace("\\","")
|
||||
itemlist = servertools.find_video_items(data = data)
|
||||
for video in itemlist:
|
||||
video.channel = item.channel
|
||||
video.action = "play"
|
||||
video.thumbnail = item.thumbnail
|
||||
video.fulltitle = item.extra1 + " / " +item.extra2
|
||||
video.title = "Ver en: " + video.server
|
||||
itemlist.append(Item(channel = item.channel) )
|
||||
block = scrapertools.find_single_match(data, '<div class="btn-group-justified">.*?</div>')
|
||||
if len(block)>0:
|
||||
matches = scrapertools.find_multiple_matches(block, 'href="([^"]+).*?hidden-xs">([^<]+)')
|
||||
for url, xs in matches:
|
||||
accion = "findvideos"
|
||||
capitulo = scrapertools.find_single_match(url, "capitulo-([^/]+)")
|
||||
if "DE CAPITULOS" in xs:
|
||||
xs = "LISTA" + xs + ": " + item.extra1
|
||||
accion = "capitulos_de_una_novela"
|
||||
else:
|
||||
xs += ": " + capitulo
|
||||
capitulo = "Capitulo " + capitulo
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
title = "[COLOR=yellow]" + xs.title() + "[/COLOR]",
|
||||
action = accion,
|
||||
url = url,
|
||||
thumbnail = item.thumbnail,
|
||||
extra1 = item.extra1,
|
||||
extra2 = capitulo
|
||||
))
|
||||
else:
|
||||
url = scrapertools.find_single_match(data, "<p><a href='(.*?)'\s+style='float:right")
|
||||
capitulo = scrapertools.find_single_match(item.extra2, "(?i)capitulo ([^/]+)")
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
title = "[COLOR=yellow]" + "" + "Listado de Capitulos: "+item.extra1.title() +"[/COLOR]",
|
||||
action = "capitulos_de_una_novela",
|
||||
url = url,
|
||||
thumbnail = item.thumbnail,
|
||||
extra1 = item.extra1,
|
||||
extra2 = capitulo
|
||||
))
|
||||
return itemlist
|
||||
@@ -412,6 +412,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
|
||||
self.addControl(control)
|
||||
|
||||
control.setVisible(False)
|
||||
control.setLabel(c["label"])
|
||||
# frodo fix
|
||||
s = self.values[c["id"]]
|
||||
if s is None:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?:datoporn.com|dato.porn)/(?:embed-|)([A-z0-9]+)",
|
||||
"pattern": "(?:datoporn.com|dato.porn|datoporn.co)/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://dato.porn/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
@@ -39,4 +39,4 @@
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/tBSWudd.png?1"
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user