Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.4.6" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.4.8" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,11 +19,12 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» gnula » playpornx
|
||||
» plusdede » yaske
|
||||
» streamplay » bdupload
|
||||
» bitertv » userscloud
|
||||
» canalpelis ¤ arreglos internos
|
||||
» yaske »divxatope
|
||||
» javtasty »bitp
|
||||
» serviporno »gvideo
|
||||
» vk »cinetux
|
||||
» ciberpeliculashd
|
||||
¤ arreglos internos
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -134,8 +134,7 @@ def novedades_episodios(item):
|
||||
contentTitle = scrapedtitle.replace('#' + episodio, '')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
hasContentDetails=True, contentSeason=1, contentTitle=contentTitle))
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -89,7 +89,6 @@ def start(itemlist, item):
|
||||
videoitem.contentTitle=item.contentTitle
|
||||
videoitem.contentType=item.contentType
|
||||
videoitem.episode_id=item.episode_id
|
||||
videoitem.hasContentDetails=item.hasContentDetails
|
||||
#videoitem.infoLabels=item.infoLabels
|
||||
videoitem.thumbnail=item.thumbnail
|
||||
#videoitem.title=item.title
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"id": "bityouth",
|
||||
"name": "Bityouth",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://s6.postimg.org/6ash180up/bityoulogo.png",
|
||||
"banner": "bityouth.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"id": "borrachodetorrent",
|
||||
"name": "BorrachodeTorrent",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://imgur.com/BePrYmy.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"id": "bricocine",
|
||||
"name": "Bricocine",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://s6.postimg.org/9u8m1ep8x/bricocine.jpg",
|
||||
"banner": "bricocine.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -143,14 +143,10 @@ def peliculas(item):
|
||||
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
scrapedtitle, year, quality)
|
||||
thumb_id = scrapertools.find_single_match(scrapedthumbnail, '.*?\/uploads\/(.*?)-')
|
||||
thumbnail = "/%s.jpg" % thumb_id
|
||||
filtro_list = {"poster_path": thumbnail}
|
||||
filtro_list = filtro_list.items()
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'filtro':filtro_list},
|
||||
contentTitle=contentTitle, thumbnail=thumbnail,
|
||||
url=scrapedurl, infoLabels={'year': year},
|
||||
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer", quality = quality))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
@@ -168,17 +164,17 @@ def peliculas(item):
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
datas = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas)
|
||||
item.fanart = scrapertools.find_single_match(
|
||||
data, "<meta property='og:image' content='([^']+)' />")
|
||||
datas, "<meta property='og:image' content='([^']+)' />")
|
||||
item.fanart = item.fanart.replace('w780', 'original')
|
||||
item.plot = scrapertools.find_single_match(data, '</span></h4><p>([^*]+)</p><h4')
|
||||
item.plot = scrapertools.find_single_match(datas, '</h4><p>(.*?)</p>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
item.infoLabels['director'] = scrapertools.find_single_match(
|
||||
data, '<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
datas, '<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
item.infoLabels['genre'] = scrapertools.find_single_match(
|
||||
data, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
|
||||
datas, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -189,8 +185,7 @@ def generos(item):
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
# url, title, cantidad
|
||||
|
||||
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -216,29 +211,30 @@ def year_release(item):
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">'
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">.*?'
|
||||
patron += '<div class="texto">([^<]+)</div>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
|
||||
if plot == '':
|
||||
plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>')
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').replace(
|
||||
' Online HD', '').replace('ver ', '').replace(' Online', '')
|
||||
' Online HD', '').replace('ver ', '').replace(' Online', '').replace(' (Serie TV)', '').strip()
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot,
|
||||
thumbnail=scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
@@ -258,7 +254,6 @@ def temporadas(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
|
||||
patron += '<img src="([^"]+)"></a></div>' # capitulos
|
||||
|
||||
@@ -267,7 +262,7 @@ def temporadas(item):
|
||||
for scrapedseason, scrapedthumbnail in matches:
|
||||
scrapedseason = " ".join(scrapedseason.split())
|
||||
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
|
||||
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail)
|
||||
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='temporadas')
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
@@ -285,6 +280,11 @@ def temporadas(item):
|
||||
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
@@ -358,8 +358,6 @@ def findvideos(item):
|
||||
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
# matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, url in matches:
|
||||
datas = httptools.downloadpage(urlparse.urljoin(host, url),
|
||||
headers={'Referer': item.url}).data
|
||||
@@ -375,10 +373,9 @@ def findvideos(item):
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
|
||||
server=server, language = lang, text_color=color3))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
{
|
||||
"id": "ohlatino",
|
||||
"name": "OH!Latino",
|
||||
"id": "ciberpeliculashd",
|
||||
"name": "Ciberpeliculashd",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "http://cinemiltonero.com/wp-content/uploads/2017/08/logo-Latino0.png",
|
||||
"banner": "https://s27.postimg.org/bz0fh8jpf/oh-pelis-banner.png",
|
||||
"thumbnail": "https://s17.postimg.org/78tekxeov/ciberpeliculashd1.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
@@ -26,6 +26,14 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
@@ -41,6 +49,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
151
plugin.video.alfa/channels/ciberpeliculashd.py
Normal file
151
plugin.video.alfa/channels/ciberpeliculashd.py
Normal file
@@ -0,0 +1,151 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
__channel__='ciberpeliculashd'
|
||||
|
||||
host = "http://ciberpeliculashd.net"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host + "/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "filtro", url = host, extra = "categories" ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por calidad", action = "filtro", url = host, extra = "qualitys"))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por idioma", action = "filtro", url = host, extra = "languages"))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s="))
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + "/?peli=1"
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/categories/animacion/?peli=1'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + '/categories/terror/?peli=1'
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto + "&peli=1"
|
||||
item.extra = "busca"
|
||||
if texto != '':
|
||||
return peliculas(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def filtro(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'uk-navbar-nav-subtitle taxonomy-menu-title">%s.*?</ul>' %item.extra
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
patron = "href='([^']+)"
|
||||
patron += "'>([^<]+)"
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, titulo in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
title = titulo,
|
||||
url = url + "/?peli=1"
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
|
||||
patron = 'a href="([^"]+)".*?'
|
||||
patron += 'img alt="([^"]+)".*?'
|
||||
patron += '((?:http|https)://image.tmdb.org[^"]+)".*?'
|
||||
patron += 'a href="([^"]+)".*?'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedurl1 in matches:
|
||||
scrapedtitle = scrapedtitle.replace(" Online imagen","").replace("Pelicula ","")
|
||||
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]+)\)")
|
||||
if year:
|
||||
year = int(year)
|
||||
else:
|
||||
year = 0
|
||||
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
channel = item.channel,
|
||||
fulltitle = fulltitle,
|
||||
thumbnail = scrapedthumbnail,
|
||||
infoLabels = {'year': year},
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
|
||||
next_page = scrapertools.find_single_match(item.url,".*?peli=")
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "peliculas",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
url = next_page
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'src="([^&]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
title = "Ver en: %s"
|
||||
itemlist.append(item.clone(action = "play",
|
||||
title = title,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
if itemlist:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
fulltitle = item.fulltitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
@@ -10,7 +10,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = 'http://cinefoxtv.net/'
|
||||
host = 'http://verhdpelis.com/'
|
||||
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
@@ -343,12 +343,14 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url:
|
||||
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
|
||||
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
|
||||
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
|
||||
item.url = "http://docs.google.com/get_video_info?docid=" + id
|
||||
if item.server == "okru":
|
||||
item.url = "https://ok.ru/videoembed/" + id
|
||||
if item.server == "youtube":
|
||||
item.url = "https://www.youtube.com/embed/" + id
|
||||
elif "links" in item.url or "www.cinetux.me" in item.url:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
|
||||
|
||||
@@ -114,7 +114,9 @@ def lista(item):
|
||||
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
|
||||
itemlist.append(item.clone(title="Dvdrip", action="entradas", url="%s/peliculas/dvdrip" % host))
|
||||
itemlist.append(item.clone(title="HD (720p/1080p)", action="entradas", url="%s/peliculas/hd" % host))
|
||||
itemlist.append(item.clone(title="4K", action="entradas", url="%s/peliculas/4k" % host))
|
||||
itemlist.append(item.clone(title="HDRIP", action="entradas", url="%s/peliculas/hdrip" % host))
|
||||
|
||||
itemlist.append(item.clone(title="Latino", action="entradas",
|
||||
url="%s/peliculas/latino-peliculas" % host))
|
||||
itemlist.append(item.clone(title="VOSE", action="entradas", url="%s/peliculas/subtituladas" % host))
|
||||
|
||||
@@ -260,14 +260,16 @@ def findvideos(item):
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
|
||||
link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
|
||||
if link != "":
|
||||
link = "http://www.divxatope1.com/" + link
|
||||
logger.info("torrent=" + link)
|
||||
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
|
||||
if al_url_fa == "":
|
||||
al_url_fa = scrapertools.find_single_match(data,
|
||||
'location\.href.*?=.*?"http:\/\/divxatope1.com/(.*?)"')
|
||||
if al_url_fa != "":
|
||||
al_url_fa = "http://www.divxatope1.com/" + al_url_fa
|
||||
logger.info("torrent=" + al_url_fa)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
|
||||
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
|
||||
url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
|
||||
parentContent=item))
|
||||
|
||||
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
|
||||
|
||||
@@ -225,6 +225,8 @@ def findvideos(item):
|
||||
#itemlist = get_url(item)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
url_m3u8 = scrapertools.find_single_match(data, '<source src=(.*?) type=application/x-mpegURL\s*/>')
|
||||
itemlist.append(item.clone(url=url_m3u8, action='play'))
|
||||
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
|
||||
@@ -72,7 +72,6 @@ def peliculas(item):
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail,
|
||||
plot = plot,
|
||||
hasContentDetails = True,
|
||||
contentTitle = scrapedtitle,
|
||||
contentType = "movie",
|
||||
language=language,
|
||||
|
||||
@@ -6,21 +6,18 @@ from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://www.javtasty.com"
|
||||
host = "https://www.javwhores.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/videos"))
|
||||
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/videos?o=tr"))
|
||||
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/videos?o=mv"))
|
||||
itemlist.append(item.clone(action="lista", title="Ordenados por duración", url=host + "/videos?o=lg"))
|
||||
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories"))
|
||||
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
|
||||
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
|
||||
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
|
||||
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search"))
|
||||
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -33,7 +30,7 @@ def configuracion(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = "%s/search?search_query=%s&search_type=videos" % (host, texto)
|
||||
item.url = "%s/search/%s/" % (host, texto)
|
||||
item.extra = texto
|
||||
try:
|
||||
return lista(item)
|
||||
@@ -48,83 +45,66 @@ def search(item, texto):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
action = "play"
|
||||
if config.get_setting("menu_info", "javtasty"):
|
||||
action = "menu_info"
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="well wellov well-sm".*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"(.*?)<div class="duration">(?:.*?</i>|)\s*([^<]+)<'
|
||||
patron = 'div class="video-item.*?href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)" '
|
||||
patron += 'alt="([^"]+)"(.*?)fa fa-clock-o"></i>([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
scrapedtitle = scrapedtitle.strip()
|
||||
if duration:
|
||||
scrapedtitle = "%s - %s" % (duration.strip(), scrapedtitle)
|
||||
|
||||
if '>HD<' in quality:
|
||||
scrapedtitle += " [COLOR red][HD][/COLOR]"
|
||||
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="prevnext">')
|
||||
next_page = scrapertools.find_single_match(data, 'next"><a href="([^"]+)')
|
||||
if next_page:
|
||||
next_page = next_page.replace("&", "&")
|
||||
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
|
||||
|
||||
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=host + next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="col-sm-4.*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"'
|
||||
patron = '(?s)<a class="item" href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)" '
|
||||
patron += 'alt="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
videourl = scrapertools.find_single_match(data, "var video_sd\s*=\s*'([^']+)'")
|
||||
videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'")
|
||||
if videourl:
|
||||
itemlist.append(['.mp4 [directo]', videourl])
|
||||
videourl = scrapertools.find_single_match(data, "var video_hd\s*=\s*'([^']+)'")
|
||||
videourl = scrapertools.find_single_match(data, "video_alt_url:\s*'([^']+)'")
|
||||
if videourl:
|
||||
itemlist.append(['.mp4 HD [directo]', videourl])
|
||||
|
||||
if item.extra == "play_menu":
|
||||
return itemlist, data
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_info(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
video_urls, data = play(item.clone(extra="play_menu"))
|
||||
itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls))
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '<div class="carousel-inner"(.*?)<div class="container">')
|
||||
matches = scrapertools.find_multiple_matches(bloque, 'src="([^"]+)"')
|
||||
for i, img in enumerate(matches):
|
||||
@@ -132,5 +112,4 @@ def menu_info(item):
|
||||
continue
|
||||
title = "Imagen %s" % (str(i))
|
||||
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel OH!Latino -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = 'http://www.ohpeliculas.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(host).data
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)<\/a> <i>(\d+)<\/i>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
mcantidad = 0
|
||||
for scrapedurl, scrapedtitle, cantidad in matches:
|
||||
mcantidad += int(cantidad)
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Peliculas",
|
||||
action='movies_menu'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Buscar",
|
||||
action="search",
|
||||
url=host+'?s=',
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def movies_menu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Todas",
|
||||
action="list_all",
|
||||
url=host
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Generos",
|
||||
action="section",
|
||||
url=host, extra='genres'))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Por año",
|
||||
action="section",
|
||||
url=host, extra='byyear'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<div id=mt-.*? class=item>.*?<a href=(.*?)><div class=image>.*?'
|
||||
patron +='<img src=(.*?) alt=.*?span class=tt>(.*?)<.*?ttx>(.*?)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
|
||||
url = scrapedurl
|
||||
action = 'findvideos'
|
||||
thumbnail = scrapedthumbnail
|
||||
contentTitle = scrapedtitle
|
||||
plot = scrapedplot
|
||||
title = contentTitle
|
||||
|
||||
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
|
||||
filtro_list = {"poster_path": filtro_thumb}
|
||||
filtro_list = filtro_list.items()
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action=action,
|
||||
title=title,
|
||||
url=url,
|
||||
plot=plot,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
infoLabels={'filtro': filtro_list}
|
||||
))
|
||||
#tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
actual_page_url = item.url
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'alignleft><a href=(.*?) ><\/a><\/div><div class=nav-next alignright>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title='Siguiente >>>',
|
||||
url=next_page,
|
||||
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
duplicated =[]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.extra == 'genres':
|
||||
patron = '<li class="cat-item cat-item-.*?><a href="(.*?)" >(.*?)<\/a>'
|
||||
elif item.extra == 'byyear':
|
||||
patron = '<a href="([^"]+)">(\d{4})<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
if url not in duplicated:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action='list_all',
|
||||
title=title,
|
||||
url=url
|
||||
))
|
||||
duplicated.append(url)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentTitle = item.fulltitle
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
if videoitem.server != 'youtube':
|
||||
videoitem.title = item.title + ' (%s)' % videoitem.server
|
||||
else:
|
||||
videoitem.title = 'Trailer en %s' % videoitem.server
|
||||
videoitem.action = 'play'
|
||||
videoitem.server = ""
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos",
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + '/release/2017/'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/genero/infantil/'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == '>> Página siguiente':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
@@ -16,19 +16,6 @@ def mainlist(item):
|
||||
item.url = "http://www.pelispekes.com/"
|
||||
|
||||
data = scrapertools.cachePage(item.url)
|
||||
'''
|
||||
<div class="poster-media-card">
|
||||
<a href="http://www.pelispekes.com/un-gallo-con-muchos-huevos/" title="Un gallo con muchos Huevos">
|
||||
<div class="poster">
|
||||
<div class="title">
|
||||
<span class="under-title">Animacion</span>
|
||||
</div>
|
||||
<span class="rating">
|
||||
<i class="glyphicon glyphicon-star"></i><span class="rating-number">6.2</span>
|
||||
</span>
|
||||
<div class="poster-image-container">
|
||||
<img width="300" height="428" src="http://image.tmdb.org/t/p/w185/cz3Kb6Xa1q0uCrsTIRDS7fYOZyw.jpg" title="Un gallo con muchos Huevos" alt="Un gallo con muchos Huevos"/>
|
||||
'''
|
||||
patron = '<div class="poster-media-card"[^<]+'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
|
||||
patron += '<div class="poster"[^<]+'
|
||||
@@ -51,7 +38,7 @@ def mainlist(item):
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
|
||||
plot=plot, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail))
|
||||
plot=plot, contentTitle=title, contentThumbnail=thumbnail))
|
||||
|
||||
# Extrae la pagina siguiente
|
||||
next_page_url = scrapertools.find_single_match(data,
|
||||
@@ -65,14 +52,6 @@ def mainlist(item):
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("item=" + item.tostring())
|
||||
|
||||
'''
|
||||
<h2>Sinopsis</h2>
|
||||
<p>Para que todo salga bien en la prestigiosa Academia Werth, la pequeña y su madre se mudan a una casa nueva. La pequeña es muy seria y madura para su edad y planea estudiar durante las vacaciones siguiendo un estricto programa organizado por su madre; pero sus planes son perturbados por un vecino excéntrico y generoso. Él le enseña un mundo extraordinario en donde todo es posible. Un mundo en el que el Aviador se topó alguna vez con el misterioso Principito. Entonces comienza la aventura de la pequeña en el universo del Principito. Y así descubre nuevamente su infancia y comprenderá que sólo se ve bien con el corazón. Lo esencial es invisible a los ojos. Adaptación de la novela homónima de Antoine de Saint-Exupery.</p>
|
||||
<div
|
||||
'''
|
||||
|
||||
# Descarga la página para obtener el argumento
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=")
|
||||
|
||||
|
||||
@@ -3,16 +3,18 @@
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = "https://www.serviporno.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Útimos videos", url="http://www.serviporno.com/"))
|
||||
Item(channel=item.channel, action="videos", title="Útimos videos", url= host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/"))
|
||||
itemlist.append(
|
||||
@@ -43,15 +45,14 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<div class="wrap-box-escena">.*?'
|
||||
patron = '(?s)<div class="wrap-box-escena">.*?'
|
||||
patron += '<div class="box-escena">.*?'
|
||||
patron += '<a href="([^"]+)" data-stats-video-id="[^"]+" data-stats-video-name="([^"]+)" data-stats-video-category="[^"]*" data-stats-list-name="[^"]*" data-stats-list-pos="[^"]*">.*?'
|
||||
patron += '<img src="([^"]+)" data-src="[^"]+" alt="[^"]+" id=\'[^\']+\' class="thumbs-changer" data-thumbs-prefix="[^"]+" height="150px" width="175px" border=0 />'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
logger.info(str(matches))
|
||||
patron += '<a\s*href="([^"]+)".*?'
|
||||
patron += 'data-stats-video-name="([^"]+)".*?'
|
||||
patron += '<img\s*src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, title, thumbnail in matches:
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail))
|
||||
@@ -106,10 +107,9 @@ def categorias(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
url = scrapertools.get_match(data, "url: '([^']+)',\s*framesURL:")
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, "sendCdnInfo.'([^']+)")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail,
|
||||
plot=item.plot, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "teledocumentales",
|
||||
"name": "Teledocumentales",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"banner": "teledocumentales.png",
|
||||
"thumbnail": "teledocumentales.png",
|
||||
"categories": [
|
||||
"documentary"
|
||||
]
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="ultimo", title="Últimos Documentales",
|
||||
url="http://www.teledocumentales.com/", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="ListaCat", title="Listado por Genero",
|
||||
url="http://www.teledocumentales.com/"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def ultimo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cachePage(item.url)
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="imagen"(.*?)<div style="clear.both">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
print "manolo"
|
||||
print matches
|
||||
|
||||
for match in matches:
|
||||
scrapedtitle = scrapertools.get_match(match, '<img src="[^"]+" alt="([^"]+)"')
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
|
||||
scrapedurl = scrapertools.get_match(match, '<a href="([^"]+)"')
|
||||
scrapedthumbnail = scrapertools.get_match(match, '<img src="([^"]+)" alt="[^"]+"')
|
||||
scrapedplot = scrapertools.get_match(match, '<div class="excerpt">([^<]+)</div>')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot, fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente pagina
|
||||
try:
|
||||
next_page = scrapertools.get_match(data, '<a class="next" href="([^"]+)">')
|
||||
itemlist.append(Item(channel=item.channel, action="ultimo", title=">> Página siguiente",
|
||||
url=urlparse.urljoin(item.url, next_page, viewmode="movie_with_plot")))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def ListaCat(item):
|
||||
logger.info()
|
||||
|
||||
url = item.url
|
||||
|
||||
data = scrapertools.cachePage(url)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
|
||||
# <div class="slidethumb">
|
||||
# <a href="http://www.cine-adicto.com/transformers-dark-of-the-moon.html"><img src="http://www.cine-adicto.com/wp-content/uploads/2011/09/Transformers-Dark-of-the-moon-wallpaper.jpg" width="638" alt="Transformers: Dark of the Moon 2011" /></a>
|
||||
# </div>
|
||||
|
||||
patron = '<div id="menu_horizontal">(.*?)<div class="cuerpo">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
logger.info("hay %d matches" % len(matches))
|
||||
|
||||
itemlist = []
|
||||
for match in matches:
|
||||
data2 = match
|
||||
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)".*?>(.*?)</a>.*?</li>'
|
||||
matches2 = re.compile(patron, re.DOTALL).findall(data2)
|
||||
logger.info("hay %d matches2" % len(matches2))
|
||||
|
||||
for match2 in matches2:
|
||||
scrapedtitle = match2[1].replace("–", "-").replace("&", "&").strip()
|
||||
scrapedurl = match2[0]
|
||||
scrapedthumbnail = match2[0].replace(" ", "%20")
|
||||
scrapedplot = ""
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="ultimo", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, fanart=scrapedthumbnail,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
data = scrapertools.cachePage(item.url)
|
||||
|
||||
urlvideo = scrapertools.get_match(data, '<!-- end navigation -->.*?<iframe src="([^"]+)"')
|
||||
data = scrapertools.cachePage(urlvideo)
|
||||
url = scrapertools.get_match(data, 'iframe src="([^"]+)"')
|
||||
|
||||
itemlist = servertools.find_video_items(data=url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
@@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import unicodedata
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
@@ -11,7 +13,11 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
|
||||
HOST = 'http://www.yaske.ro'
|
||||
domain = "yaske.ro"
|
||||
HOST = "http://www." + domain
|
||||
HOST_MOVIES = "http://peliculas." + domain + "/now_playing/"
|
||||
HOST_TVSHOWS = "http://series." + domain + "/popular/"
|
||||
HOST_TVSHOWS_TPL = "http://series." + domain + "/tpl"
|
||||
parameters = channeltools.get_channel_parameters('yaske')
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
@@ -26,38 +32,156 @@ def mainlist(item):
|
||||
item.fanart = fanart_host
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
|
||||
url=HOST,
|
||||
itemlist.append(item.clone(title="Peliculas", text_bold=True, viewcontent='movies',
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
|
||||
itemlist.append(item.clone(title=" Novedades", action="peliculas", viewcontent='movies',
|
||||
url=HOST_MOVIES,
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title=" Estrenos", action="peliculas",
|
||||
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
|
||||
itemlist.append(item.clone(title="Género", action="menu_buscar_contenido", text_bold=True,thumbnail=thumbnail % 'generos', viewmode="thumbnails",
|
||||
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", thumbnail=thumbnail % 'generos', viewmode="thumbnails",
|
||||
url=HOST
|
||||
))
|
||||
itemlist.append(item.clone(title=" Buscar película", action="search", thumbnail=thumbnail % 'buscar',
|
||||
type = "movie" ))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
|
||||
itemlist.append(item.clone(title="Series", text_bold=True, viewcontent='movies',
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title=" Novedades", action="series", viewcontent='movies',
|
||||
url=HOST_TVSHOWS,
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title=" Buscar serie", action="search", thumbnail=thumbnail % 'buscar',
|
||||
type = "tvshow" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
|
||||
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
|
||||
if not page:
|
||||
page = 1
|
||||
url_p = item.url
|
||||
else:
|
||||
page = int(page) + 1
|
||||
if "search" in item.url:
|
||||
url_p += "&page=%s" %page
|
||||
else:
|
||||
url_p += "?page=%s" %page
|
||||
data = httptools.downloadpage(url_p).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '(?s)class="post-item-image btn-play-item".*?'
|
||||
patron += 'href="(http://series[^"]+)">.*?'
|
||||
patron += '<img data-original="([^"]+)".*?'
|
||||
patron += 'glyphicon-play-circle"></i>([^<]+).*?'
|
||||
patron += 'glyphicon-calendar"></i>([^<]+).*?'
|
||||
patron += 'text-muted f-14">(.*?)</h3'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches:
|
||||
scrapedepisodes.strip()
|
||||
year = year.strip()
|
||||
contentSerieName = scrapertools.htmlclean(scrapedtitle.strip())
|
||||
title = "%s (%s)" %(contentSerieName, scrapedepisodes)
|
||||
if "series" in scrapedurl:
|
||||
itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentSerieName=contentSerieName,
|
||||
infoLabels={"year": year}, text_color=color1))
|
||||
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
patron_next_page = 'href="([^"]+)">\s*»'
|
||||
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
|
||||
if matches_next_page and len(itemlist)>0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="series", title=">> Página siguiente", thumbnail=thumbnail_host,
|
||||
url=url_p, folder=True, text_color=color3, text_bold=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
post = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'media-object" src="([^"]+).*?'
|
||||
patron += 'media-heading">([^<]+).*?'
|
||||
patron += '<code>(.*?)</div>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedtitle, scrapedcapitulos in matches:
|
||||
id = scrapertools.find_single_match(item.url, "yaske.ro/([0-9]+)")
|
||||
season = scrapertools.find_single_match(scrapedtitle, "[0-9]+")
|
||||
title = scrapedtitle + " (%s)" %scrapedcapitulos.replace("</code>","").replace("\n","")
|
||||
post = {"data[season]" : season, "data[id]" : id, "name" : "list_episodes" , "both" : "0", "type" : "template"}
|
||||
post = urllib.urlencode(post)
|
||||
item.infoLabels["season"] = season
|
||||
itemlist.append(item.clone(action = "capitulos",
|
||||
post = post,
|
||||
title = title,
|
||||
url = HOST_TVSHOWS_TPL
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title =""))
|
||||
itemlist.append(item.clone(action = "add_serie_to_library",
|
||||
channel = item.channel,
|
||||
extra = "episodios",
|
||||
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = temporadas(item)
|
||||
for tempitem in templist:
|
||||
itemlist += capitulos(tempitem)
|
||||
return itemlist
|
||||
|
||||
|
||||
def capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, post=item.post).data
|
||||
data = data.replace("<wbr>","")
|
||||
patron = 'href=."([^"]+).*?'
|
||||
patron += 'media-heading.">([^<]+).*?'
|
||||
patron += 'fecha de emisi.*?: ([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapeddate in matches:
|
||||
scrapedtitle = scrapedtitle + " (%s)" %scrapeddate
|
||||
episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)")
|
||||
query = item.contentSerieName + " " + scrapertools.find_single_match(scrapedtitle, "\w+")
|
||||
item.infoLabels["episode"] = episode
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
title = scrapedtitle.decode("unicode-escape"),
|
||||
query = query.replace(" ","+"),
|
||||
url = scrapedurl.replace("\\","")
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
try:
|
||||
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
|
||||
item.extra = ""
|
||||
itemlist.extend(peliculas(item))
|
||||
if item.type == "movie":
|
||||
itemlist.extend(peliculas(item))
|
||||
else:
|
||||
itemlist.extend(series(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
item_pag = itemlist[-1]
|
||||
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
|
||||
itemlist.append(item_pag)
|
||||
else:
|
||||
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
|
||||
|
||||
return itemlist
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -77,7 +201,6 @@ def newest(categoria):
|
||||
item.url = HOST + "/genre/27/"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
@@ -95,8 +218,18 @@ def newest(categoria):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
|
||||
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
|
||||
if not page:
|
||||
page = 1
|
||||
url_p = item.url
|
||||
else:
|
||||
page = int(page) + 1
|
||||
if "search" in item.url:
|
||||
url_p += "&page=%s" %page
|
||||
else:
|
||||
url_p += "?page=%s" %page
|
||||
data = httptools.downloadpage(url_p).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '(?s)class="post-item-image btn-play-item".*?'
|
||||
patron += 'href="([^"]+)">.*?'
|
||||
@@ -105,12 +238,8 @@ def peliculas(item):
|
||||
patron += 'post(.*?)</div.*?'
|
||||
patron += 'text-muted f-14">(.*?)</h3'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
patron_next_page = 'href="([^"]+)"> »'
|
||||
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = item.url + matches_next_page
|
||||
|
||||
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
|
||||
query = scrapertools.find_single_match(scrapedurl, 'yaske.ro/[0-9]+/(.*?)/').replace("-","+")
|
||||
year = year.strip()
|
||||
patronidiomas = '<img src="([^"]+)"'
|
||||
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
|
||||
@@ -124,28 +253,27 @@ def peliculas(item):
|
||||
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
|
||||
title = "%s %s" % (contentTitle, idiomas_disponibles)
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
|
||||
thumbnail=scrapedthumbnail, contentTitle=contentTitle, query = query,
|
||||
infoLabels={"year": year}, text_color=color1))
|
||||
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
if matches_next_page:
|
||||
patron_next_page = 'href="([^"]+)">\s*»'
|
||||
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
|
||||
if matches_next_page and len(itemlist)>0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
|
||||
url=url_next_page, folder=True, text_color=color3, text_bold=True))
|
||||
|
||||
url=url_p, folder=True, text_color=color3, text_bold=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_buscar_contenido(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'Generos.*?</ul>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
# Extrae las entradas
|
||||
patron = 'href="([^"]+)">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -159,11 +287,7 @@ def menu_buscar_contenido(item):
|
||||
folder = True,
|
||||
viewmode = "movie_with_plot"
|
||||
))
|
||||
|
||||
if item.extra in ['genre', 'audio', 'year']:
|
||||
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
|
||||
else:
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
@@ -171,11 +295,12 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
sublist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
|
||||
patron = '(?s)id="online".*?server="([^"]+)"'
|
||||
mserver = scrapertools.find_single_match(data, patron)
|
||||
url_m = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
|
||||
patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
|
||||
if not item.query:
|
||||
item.query = scrapertools.find_single_match(item.url, "peliculas.*?/[0-9]+/([^/]+)").replace("-","+")
|
||||
url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver)
|
||||
patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
|
||||
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
|
||||
data = httptools.downloadpage(url_m).data
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
@@ -23,7 +23,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
|
||||
|
||||
# Headers por defecto, si no se especifica nada
|
||||
default_headers = dict()
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3163.100 Safari/537.36"
|
||||
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
|
||||
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
|
||||
default_headers["Accept-Charset"] = "UTF-8"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------------------------------
|
||||
# Item is the object we use for representing data
|
||||
# --------------------------------------------------------------------------------
|
||||
@@ -170,8 +170,6 @@ class Item(object):
|
||||
# Al modificar cualquiera de estos atributos content...
|
||||
if name in ["contentTitle", "contentPlot", "plot", "contentSerieName", "contentType", "contentEpisodeTitle",
|
||||
"contentSeason", "contentEpisodeNumber", "contentThumbnail", "show", "contentQuality", "quality"]:
|
||||
# ... marcamos hasContentDetails como "true"...
|
||||
self.__dict__["hasContentDetails"] = True
|
||||
# ...y actualizamos infoLables
|
||||
if name == "contentTitle":
|
||||
self.__dict__["infoLabels"]["title"] = value
|
||||
@@ -236,10 +234,6 @@ class Item(object):
|
||||
self.__dict__["viewcontent"] = viewcontent
|
||||
return viewcontent
|
||||
|
||||
# Valor por defecto para hasContentDetails
|
||||
elif name == "hasContentDetails":
|
||||
return False
|
||||
|
||||
# valores guardados en infoLabels
|
||||
elif name in ["contentTitle", "contentPlot", "contentSerieName", "show", "contentType", "contentEpisodeTitle",
|
||||
"contentSeason", "contentEpisodeNumber", "contentThumbnail", "plot", "duration",
|
||||
|
||||
@@ -268,8 +268,9 @@ def save_tvshow(item, episodelist):
|
||||
# Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos
|
||||
logger.info("Creando tvshow.nfo: " + tvshow_path)
|
||||
head_nfo = scraper.get_nfo(item)
|
||||
|
||||
item_tvshow = Item(title=item.contentTitle, channel="videolibrary", action="get_seasons",
|
||||
item.infoLabels['mediatype'] = "tvshow"
|
||||
item.infoLabels['title'] = item.contentSerieName
|
||||
item_tvshow = Item(title=item.contentSerieName, channel="videolibrary", action="get_seasons",
|
||||
fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'],
|
||||
infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, ""))
|
||||
item_tvshow.library_playcounts = {}
|
||||
@@ -294,7 +295,6 @@ def save_tvshow(item, episodelist):
|
||||
|
||||
if item.channel != "downloads":
|
||||
item_tvshow.active = 1 # para que se actualice a diario cuando se llame a videolibrary_service
|
||||
|
||||
filetools.write(tvshow_path, head_nfo + item_tvshow.tojson())
|
||||
|
||||
if not episodelist:
|
||||
@@ -439,7 +439,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
|
||||
news_in_playcounts["season %s" % e.contentSeason] = 0
|
||||
# Marcamos la serie como no vista
|
||||
# logger.debug("serie " + serie.tostring('\n'))
|
||||
news_in_playcounts[serie.contentTitle] = 0
|
||||
news_in_playcounts[serie.contentSerieName] = 0
|
||||
|
||||
else:
|
||||
logger.info("Sobreescrito: %s" % json_path)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"url": "https://www.bitporno.com/e/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)",
|
||||
"pattern": "raptu.com/(?:\\?v=|embed/|e/|v/)([A-z0-9]+)",
|
||||
"url": "https://www.bitporno.com/e/\\1"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
videourl = scrapertools.find_multiple_matches(data, 'file":"([^"]+).*?label":"([^"]+)')
|
||||
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?data-res="([^"]+)')
|
||||
scrapertools.printMatches(videourl)
|
||||
for scrapedurl, scrapedquality in videourl:
|
||||
if "loadthumb" in scrapedurl:
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?:divxstage|cloudtime).[^/]+/video/([^\"' ]+)",
|
||||
"url": "http://www.cloudtime.to/embed/?v=\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "divxstage",
|
||||
"name": "divxstage",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_divxstage.png"
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://www.cloudtime.to"
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url.replace('/embed/?v=', '/video/')).data
|
||||
|
||||
if "This file no longer exists" in data:
|
||||
return False, "El archivo no existe<br/>en divxstage o ha sido borrado."
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
if "divxstage.net" in page_url:
|
||||
page_url = page_url.replace("divxstage.net", "cloudtime.to")
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
video_urls = []
|
||||
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
|
||||
if not videourls:
|
||||
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
|
||||
for videourl in videourls:
|
||||
if videourl.endswith(".mpd"):
|
||||
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
|
||||
videourl = "http://www.cloudtime.to/download.php%3Ffile=mm" + "%s.mp4" % id
|
||||
|
||||
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
|
||||
ext = scrapertools.get_filename_from_url(videourl)[-4:]
|
||||
videourl = videourl.replace("%3F", "?") + \
|
||||
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
|
||||
video_urls.append([ext + " [cloudtime]", videourl])
|
||||
|
||||
return video_urls
|
||||
@@ -9,8 +9,6 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
|
||||
if 'googleusercontent' in page_url:
|
||||
return True, ""
|
||||
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
||||
if "no+existe" in response.data:
|
||||
return False, "[gvideo] El video no existe o ha sido borrado"
|
||||
@@ -22,6 +20,8 @@ def test_video_exists(page_url):
|
||||
return False, "[gvideo] Se ha producido un error en el reproductor de google"
|
||||
if "No+se+puede+procesar+este" in response.data:
|
||||
return False, "[gvideo] No se puede procesar este video"
|
||||
if response.code == 429:
|
||||
return False, "[gvideo] Demasiadas conexiones al servidor, inténtelo después"
|
||||
return True, ""
|
||||
|
||||
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "idowatch.net/(?:embed-)?([a-z0-9]+)",
|
||||
"url": "http://idowatch.net/\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "idowatch",
|
||||
"name": "idowatch",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_idowatch.png"
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = scrapertools.cache_page(page_url)
|
||||
if "File Not Found" in data:
|
||||
return False, "[Idowatch] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
|
||||
mediaurl = scrapertools.find_single_match(data, ',{file:(?:\s+|)"([^"]+)"')
|
||||
if not mediaurl:
|
||||
matches = scrapertools.find_single_match(data,
|
||||
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
|
||||
matchjs = jsunpack.unpack(matches).replace("\\", "")
|
||||
mediaurl = scrapertools.find_single_match(matchjs, ',{file:(?:\s+|)"([^"]+)"')
|
||||
|
||||
video_urls = []
|
||||
video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [idowatch]", mediaurl])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "nosvideo.com/(?:\\?v=|vj/video.php\\?u=|)([a-z0-9]+)",
|
||||
"url": "http://nosvideo.com/vj/videomain.php?u=\\1==530"
|
||||
},
|
||||
{
|
||||
"pattern": "nosupload.com(/\\?v\\=[a-z0-9]+)",
|
||||
"url": "http://nosvideo.com\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "nosvideo",
|
||||
"name": "nosvideo",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
|
||||
if "404 Page no found" in data:
|
||||
return False, "[nosvideo] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
|
||||
# Lee la URL
|
||||
data = scrapertools.cache_page(page_url)
|
||||
urls = scrapertools.find_multiple_matches(data, ":'(http:\/\/.+?(?:v.mp4|.smil))")
|
||||
urls = set(urls)
|
||||
|
||||
for media_url in urls:
|
||||
if ".smil" in media_url:
|
||||
data = scrapertools.downloadpage(media_url)
|
||||
rtmp = scrapertools.find_single_match(data, '<meta base="([^"]+)"')
|
||||
playpath = scrapertools.find_single_match(data, '<video src="([^"]+)"')
|
||||
media_url = rtmp + " playpath=" + playpath
|
||||
filename = "rtmp"
|
||||
else:
|
||||
filename = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
video_urls.append([filename + " [nosvideo]", media_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(nowdownload.\\w{2}]/dl/[a-z0-9]+)",
|
||||
"url": "http://www.\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": false,
|
||||
"id": "nowdownload",
|
||||
"name": "nowdownload",
|
||||
"premium": [
|
||||
"realdebrid"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_nowdownload.png"
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
'''
|
||||
<a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>
|
||||
'''
|
||||
data = scrapertools.cache_page(page_url)
|
||||
logger.debug("data:" + data)
|
||||
try:
|
||||
url = scrapertools.get_match(data,
|
||||
'<a href="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>')
|
||||
except:
|
||||
# $.get("/api/token.php?token=7e1ab09df2775dbea02506e1a2651883");
|
||||
token = scrapertools.get_match(data, '(/api/token.php\?token=[^"]*)')
|
||||
logger.debug("token:" + token)
|
||||
d = scrapertools.cache_page("http://www.nowdownload.co" + token)
|
||||
url = scrapertools.get_match(data, 'expiryText: \'<a class="btn btn-danger" href="([^"]*)')
|
||||
logger.debug("url_1:" + url)
|
||||
data = scrapertools.cache_page("http://www.nowdownload.co" + url)
|
||||
logger.debug("data:" + data)
|
||||
# <a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a>
|
||||
url = scrapertools.get_match(data, '<a href="([^"]*)" class="btn btn-success">Click here to download !</a>')
|
||||
logger.debug("url_final:" + url)
|
||||
|
||||
video_urls = [url]
|
||||
return video_urls
|
||||
@@ -1,32 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"free": true,
|
||||
"id": "pcloud",
|
||||
"name": "pcloud",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
if "Invalid link" in data: return False, "[pCloud] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
media_url = scrapertools.find_single_match(data, '"downloadlink":.*?"([^"]+)"')
|
||||
media_url = media_url.replace("\\", "")
|
||||
|
||||
video_urls = []
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [pCloud]", media_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,49 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(http://stagevu.com/video/[A-Z0-9a-z]+)",
|
||||
"url": "\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "http://stagevu.com.*?uid\\=([A-Z0-9a-z]+)",
|
||||
"url": "http://stagevu.com/video/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "http://[^\\.]+\\.stagevu.com/v/[^/]+/(.*?).avi",
|
||||
"url": "http://stagevu.com/video/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "stagevu",
|
||||
"name": "stagevu",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
# Returns an array of possible video url's from the page_url
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
video_urls = []
|
||||
|
||||
# Descarga la página del vídeo
|
||||
data = scrapertools.cache_page(page_url)
|
||||
|
||||
# Busca el vídeo de dos formas distintas
|
||||
patronvideos = '<param name="src" value="([^"]+)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
video_urls = [["[stagevu]", matches[0]]]
|
||||
else:
|
||||
patronvideos = 'src="([^"]+stagevu.com/[^i][^"]+)"' # Forma src="XXXstagevu.com/ y algo distinto de i para evitar images e includes
|
||||
matches = re.findall(patronvideos, data)
|
||||
if len(matches) > 0:
|
||||
video_urls = [["[stagevu]", matches[0]]]
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "stormo.tv/(?:videos/|embed/)([0-9]+)",
|
||||
"url": "http://stormo.tv/embed/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "stormo",
|
||||
"name": "stormo",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/mTYCw5E.png"
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
response = httptools.downloadpage(page_url)
|
||||
if "video_error.mp4" in response.data:
|
||||
return False, "[Stormo] El archivo no existe o ha sido borrado"
|
||||
if response.code == 451:
|
||||
return False, "[Stormo] El archivo ha sido borrado por problemas legales."
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info(" url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
media_url = scrapertools.find_single_match(data, "file\s*:\s*['\"]([^'\"]+)['\"]")
|
||||
if media_url.endswith("/"):
|
||||
media_url = media_url[:-1]
|
||||
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [stormo]", media_url])
|
||||
for video_url in video_urls:
|
||||
logger.info(" %s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "turbovideos.net/embed-([a-z0-9A-Z]+)",
|
||||
"url": "http://turbovideos.net/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "turbovideos",
|
||||
"name": "turbovideos",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
if "embed" not in page_url:
|
||||
page_url = page_url.replace("http://turbovideos.net/", "http://turbovideos.net/embed-") + ".html"
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
logger.info("data=" + data)
|
||||
|
||||
data = scrapertools.find_single_match(data,
|
||||
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
|
||||
logger.info("data=" + data)
|
||||
|
||||
data = jsunpack.unpack(data)
|
||||
logger.info("data=" + data)
|
||||
|
||||
video_urls = []
|
||||
# {file:"http://ultra.turbovideos.net/73ciplxta26xsbj2bqtkqcd4rtyxhgx5s6fvyzed7ocf4go2lxjnd6e5kjza/v.mp4",label:"360"
|
||||
media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)",label:"([^"]+)"')
|
||||
for media_url, label in media_urls:
|
||||
|
||||
if not media_url.endswith("srt"):
|
||||
video_urls.append(
|
||||
[scrapertools.get_filename_from_url(media_url)[-4:] + " " + label + " [turbovideos]", media_url])
|
||||
|
||||
return video_urls
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
@@ -7,35 +8,28 @@ from platformcode import logger
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "This video has been removed from public access" in data:
|
||||
if "This video has been removed from public access" in data or "Video not found." in data:
|
||||
return False, "El archivo ya no esta disponible<br/>en VK (ha sido borrado)"
|
||||
else:
|
||||
return True, ""
|
||||
return True, ""
|
||||
|
||||
|
||||
# Returns an array of possible video url's from the page_url
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
video_urls = []
|
||||
try:
|
||||
oid, id = scrapertools.find_single_match(page_url, 'oid=([^&]+)&id=(\d+)')
|
||||
except:
|
||||
oid, id = scrapertools.find_single_match(page_url, 'video(\d+)_(\d+)')
|
||||
|
||||
from core import httptools
|
||||
headers = {'User-Agent': 'Mozilla/5.0'}
|
||||
url = "http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s" % (oid, id)
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, '<source src="([^"]+)" type="video/(\w+)')
|
||||
for media_url, ext in matches:
|
||||
calidad = scrapertools.find_single_match(media_url, '(\d+)\.%s' % ext)
|
||||
video_urls.append(["." + ext + " [vk:" + calidad + "]", media_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user