@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.4.17" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.4.18" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,7 +19,9 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» pelisplusco
|
||||
» seriesblanco » rapidvideo
|
||||
» kbagi » bitertv
|
||||
» doomtv » miltorrents
|
||||
¤ arreglos internos
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -7,6 +7,7 @@ import urlparse
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
@@ -219,23 +220,38 @@ def newest(categoria):
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_vip(item, url):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(url+'/videocontent').data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
video_id = scrapertools.find_single_match(data, 'id=videoInfo ><span >(.*?)</span>')
|
||||
new_url = 'https://v.d0stream.com/api/videoinfo/%s?src-url=https://Fv.d0stream.com' % video_id
|
||||
json_data = httptools.downloadpage(new_url).data
|
||||
dict_data = jsontools.load(json_data)
|
||||
sources = dict_data['sources']
|
||||
|
||||
for vip_item in sources['mp4_cdn']:
|
||||
vip_url= vip_item['url']
|
||||
vip_quality = vip_item['label']
|
||||
title ='%s [%s]' % (item.title, vip_quality)
|
||||
itemlist.append(item.clone(title = title, url=vip_url, action='play', quality=vip_quality, server='directo'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)')
|
||||
data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data
|
||||
data_m3u8 = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data_m3u8)
|
||||
url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),')
|
||||
itemlist.append(item.clone(url=url_m3u8, action='play'))
|
||||
player_vip = scrapertools.find_single_match(data, 'class=movieplay><iframe src=(https://v.d0stream.com.*?) frameborder')
|
||||
itemlist.extend(get_vip(item, player_vip))
|
||||
|
||||
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, urls in matches:
|
||||
quality = scrapertools.find_single_match(data, '<div class=les-content><a href=#%s>(.*?)<\/a><\/div>'%option)
|
||||
title = '%s (%s)' % (item.title, quality)
|
||||
|
||||
if 'content' in urls:
|
||||
urls = '%s%s'%('http:',urls)
|
||||
hidden_data = httptools.downloadpage(urls).data
|
||||
@@ -248,20 +264,18 @@ def findvideos(item):
|
||||
new_item = Item(
|
||||
channel = item.channel,
|
||||
url = videoitem,
|
||||
title = title,
|
||||
title = item.title,
|
||||
contentTitle = item.title,
|
||||
action = 'play',
|
||||
quality = quality
|
||||
)
|
||||
itemlist.append(new_item)
|
||||
else:
|
||||
new_item = Item(
|
||||
channel=item.channel,
|
||||
url=urls,
|
||||
title=title,
|
||||
title=item.title,
|
||||
contentTitle=item.title,
|
||||
action='play',
|
||||
quality = quality
|
||||
)
|
||||
itemlist.append(new_item)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
@@ -134,14 +134,14 @@ def listado(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
|
||||
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
|
||||
patron = '<div class="size">(.*?)</div></div></div>'
|
||||
patron = 'data-file-id(.*?)</div></div></li>'
|
||||
bloques = scrapertools.find_multiple_matches(data, patron)
|
||||
for block in bloques:
|
||||
if "adult_info" in block and not adult_content:
|
||||
continue
|
||||
size = scrapertools.find_single_match(block, '<p>([^<]+)</p>')
|
||||
scrapedurl, scrapedtitle = scrapertools.find_single_match(block,
|
||||
'<div class="name"><a href="([^"]+)".*?>([^<]+)<')
|
||||
size = scrapertools.find_single_match(block, '<p.*?>([^<]+)</p>')
|
||||
patron = 'class="name"><a href="([^"]+)".*?>([^<]+)<'
|
||||
scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron)
|
||||
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
|
||||
if scrapedthumbnail:
|
||||
try:
|
||||
@@ -161,7 +161,6 @@ def listado(item):
|
||||
|
||||
else:
|
||||
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
|
||||
|
||||
scrapedurl = item.extra + scrapedurl
|
||||
title = "%s (%s)" % (scrapedtitle, size)
|
||||
if "adult_info" in block:
|
||||
@@ -186,7 +185,7 @@ def listado(item):
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
next_page = scrapertools.find_single_match(data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"')
|
||||
next_page = scrapertools.find_single_match(data, 'class="pageSplitter" data-nextpage-number="([^"]+)"')
|
||||
if next_page:
|
||||
if item.post:
|
||||
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,76 +0,0 @@
|
||||
{
|
||||
"id": "peliculasnu",
|
||||
"name": "Peliculas.Nu",
|
||||
"language": ["cast", "lat"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://i.imgur.com/2iupwXE.png",
|
||||
"banner": "peliculasnu.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Películas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 2,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"llvalues": [
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,284 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
__modo_grafico__ = config.get_setting("modo_grafico", "peliculasnu")
|
||||
__perfil__ = config.get_setting("perfil", "peliculasnu")
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
|
||||
color1, color2, color3 = perfil[__perfil__]
|
||||
host = "http://peliculas.nu/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="entradas", url=host, fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
itemlist.append(item.clone(title="Más Vistas", action="entradas", url=host + "mas-vistas",
|
||||
fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
itemlist.append(item.clone(title="Mejor Valoradas", action="entradas", url=host + "mejor-valoradas",
|
||||
fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
item.text_color = color2
|
||||
itemlist.append(item.clone(title="En Español", action="entradas", url=host + "?s=Español",
|
||||
fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
itemlist.append(item.clone(title="En Latino", action="entradas", url=host + "?s=Latino",
|
||||
fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
itemlist.append(
|
||||
item.clone(title="En VOSE", action="entradas", url=host + "?s=VOSE", fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
item.text_color = color3
|
||||
itemlist.append(item.clone(title="Por género", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
itemlist.append(item.clone(title="Por letra", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
|
||||
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search"))
|
||||
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
try:
|
||||
item.url = "%s?s=%s" % (host, texto)
|
||||
item.action = "entradas"
|
||||
return entradas(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "peliculas":
|
||||
item.url = host
|
||||
elif categoria == "terror":
|
||||
item.url = host+"terror/"
|
||||
elif categoria == 'castellano':
|
||||
item.url = host + "?s=Español"
|
||||
elif categoria == 'latino':
|
||||
item.url = host + "?s=Latino"
|
||||
|
||||
|
||||
item.from_newest = True
|
||||
item.action = "entradas"
|
||||
itemlist = entradas(item)
|
||||
|
||||
if itemlist[-1].action == "entradas":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def entradas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="TPostMv">.*?href="([^"]+)".*?src="([^"]+)".*?class="Title">([^<]+)<.*?' \
|
||||
'.*?"Date AAIco-date_range">(\d+).*?class="Qlty">([^<]+)<.*?<p class="Idioma(.*?)</p>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.extra == "next":
|
||||
matches_ = matches[15:]
|
||||
else:
|
||||
matches_ = matches[:15]
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad, data_idioma in matches_:
|
||||
idiomas = []
|
||||
if "/espm" in data_idioma:
|
||||
idiomas.append("CAST")
|
||||
if "/latinom" in data_idioma:
|
||||
idiomas.append("LAT")
|
||||
if "/vosemi" in data_idioma:
|
||||
idiomas.append("VOSE")
|
||||
|
||||
titulo = "%s [%s]" % (scrapedtitle, calidad)
|
||||
if idiomas:
|
||||
titulo += " [%s]" % "/".join(idiomas)
|
||||
|
||||
scrapedthumbnail = scrapedthumbnail.replace("-160x242", "")
|
||||
infolabels = {'year': year}
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
|
||||
contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2,
|
||||
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle,
|
||||
language=idiomas, quality=calidad))
|
||||
|
||||
if not item.from_newest:
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
if not item.extra and len(matches) > 15:
|
||||
itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3))
|
||||
elif item.extra == "next":
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td class="MvTbImg">.*?href="([^"]+)".*?src="([^"]+)".*?<strong>([^<]+)<.*?' \
|
||||
'.*?<td>(\d+).*?class="Qlty">([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.extra == "next":
|
||||
matches_ = matches[15:]
|
||||
else:
|
||||
matches_ = matches[:15]
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad in matches_:
|
||||
titulo = "%s [%s]" % (scrapedtitle, calidad)
|
||||
scrapedthumbnail = scrapedthumbnail.replace("-55x85", "")
|
||||
infolabels = {'year': year}
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
|
||||
contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2,
|
||||
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
if not item.extra and len(matches) > 15:
|
||||
itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3))
|
||||
elif item.extra == "next":
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def indices(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(host).data
|
||||
if "letra" in item.title:
|
||||
action = "listado"
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="AZList">(.*?)</ul>')
|
||||
else:
|
||||
action = "entradas"
|
||||
bloque = scrapertools.find_single_match(data, 'Géneros</a>(.*?)</ul>')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '<li.*?<a href="([^"]+)">([^<]+)</a>')
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action=action, url=scrapedurl, title=scrapedtitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
tmdb.set_infoLabels_item(item, __modo_grafico__)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if not item.infoLabels["plot"]:
|
||||
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="Description">.*?<p>(.*?)</p>')
|
||||
fanart = scrapertools.find_single_match(data, '<img class="TPostBg" src="([^"]+)"')
|
||||
if not item.fanart and fanart:
|
||||
item.fanart = fanart
|
||||
|
||||
patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for tipo, source, title in matches:
|
||||
if tipo == "trailer":
|
||||
continue
|
||||
post = "source=%s&action=obtenerurl" % urllib.quote(source)
|
||||
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
|
||||
data_url = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post, headers=headers).data
|
||||
url = jsontools.load(data_url).get("url")
|
||||
|
||||
if 'openload' in url:
|
||||
url = url + '|' + item.url
|
||||
extra_info = title.split(' - ')
|
||||
title = "%s - %s" % ('%s', title)
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, language=extra_info[0],
|
||||
quality=extra_info[1],text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
if item.extra != "findvideos" and config.get_videolibrary_support():
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library", extra="findvideos",
|
||||
text_color="green"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if "drive.php?v=" in item.url:
|
||||
if not item.url.startswith("http:") and not item.url.startswith("https:"):
|
||||
item.url = "http:" + item.url
|
||||
data = httptools.downloadpage(item.url, add_referer=True).data.replace("\\", "")
|
||||
|
||||
subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'")
|
||||
patron = '"label":\s*"([^"]+)","type":\s*"video/([^"]+)","(?:src|file)":\s*"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for calidad, extension, url in matches:
|
||||
url = url.replace(",", "%2C")
|
||||
title = ".%s %s [directo]" % (extension, calidad)
|
||||
itemlist.append([title, url, 0, subtitulo])
|
||||
try:
|
||||
itemlist.sort(key=lambda it: int(it[0].split(" ")[1].split("p")[0]))
|
||||
except:
|
||||
pass
|
||||
elif "metiscs" in item.url:
|
||||
import base64
|
||||
from lib import jsunpack
|
||||
|
||||
item.url = item.url.replace("https:", "http:")
|
||||
if not item.url.startswith("http:"):
|
||||
item.url = "http:" + item.url
|
||||
|
||||
data = httptools.downloadpage(item.url, add_referer=True).data
|
||||
str_encode = scrapertools.find_multiple_matches(data, '(?:\+|\()"([^"]+)"')
|
||||
data = base64.b64decode("".join(str_encode))
|
||||
packed = scrapertools.find_single_match(data, '(eval\(function.*?)(?:</script>|\}\)\))')
|
||||
if not packed:
|
||||
packed = data
|
||||
data_js = jsunpack.unpack(packed)
|
||||
|
||||
subtitle = scrapertools.find_single_match(data_js, 'tracks:\[\{"file":"([^"]+)"')
|
||||
patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data_js, patron)
|
||||
for url, calidad, extension in matches:
|
||||
url = url.replace(",", "%2C")
|
||||
title = ".%s %s [directo]" % (extension, calidad)
|
||||
itemlist.insert(0, [title, url, 0, subtitle])
|
||||
else:
|
||||
return [item]
|
||||
|
||||
return itemlist
|
||||
@@ -10,6 +10,7 @@ from core import scrapertoolsV2
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
from channels import autoplay
|
||||
|
||||
|
||||
@@ -108,7 +109,7 @@ def extract_series_from_data(item, data):
|
||||
context.extend(context2)
|
||||
|
||||
itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url),
|
||||
action=action, show=name,
|
||||
action=action, show=name, contentSerieName=name,
|
||||
thumbnail=img,
|
||||
context=context))
|
||||
|
||||
@@ -121,6 +122,7 @@ def extract_series_from_data(item, data):
|
||||
# logger.debug("Adding previous page item")
|
||||
itemlist.append(item.clone(title="<< Anterior", extra=item.extra - 1))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -189,7 +191,8 @@ def search(item, texto):
|
||||
for url, img, title in shows:
|
||||
title = title.strip()
|
||||
itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title,
|
||||
thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES)))
|
||||
thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES),
|
||||
contentSerieName=title))
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
@@ -222,12 +225,18 @@ def episodios(item):
|
||||
re.findall("banderas/([^\.]+)", flags, re.MULTILINE)])
|
||||
filter_lang = idiomas.replace("[", "").replace("]", "").split(" ")
|
||||
display_title = "%s - %s %s" % (item.show, title, idiomas)
|
||||
|
||||
season_episode = scrapertoolsV2.get_season_and_episode(title).split('x')
|
||||
item.infoLabels['season']= season_episode[0]
|
||||
item.infoLabels['episode'] = season_episode[1]
|
||||
# logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url)))
|
||||
itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url),
|
||||
action="findvideos", plot=plot, fanart=fanart, language=filter_lang))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
|
||||
|
||||
@@ -1258,13 +1258,13 @@ class Tmdb(object):
|
||||
self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
|
||||
self.temporada[numtemporada] = {"episodes": {}}
|
||||
|
||||
# if "status_code" in self.temporada[numtemporada]:
|
||||
# # Se ha producido un error
|
||||
# msg = "La busqueda de " + buscando + " no dio resultados."
|
||||
# msg += "\nError de tmdb: %s %s" % (
|
||||
# self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
|
||||
# logger.debug(msg)
|
||||
# self.temporada[numtemporada] = {"episodes": {}}
|
||||
if "status_code" in self.temporada[numtemporada]:
|
||||
#Se ha producido un error
|
||||
msg = "La busqueda de " + buscando + " no dio resultados."
|
||||
msg += "\nError de tmdb: %s %s" % (
|
||||
self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
|
||||
logger.debug(msg)
|
||||
self.temporada[numtemporada] = {"episodes": {}}
|
||||
|
||||
return self.temporada[numtemporada]
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(http://biter.tv/v/[A-z0-9]+)",
|
||||
"pattern": "(http://b.ter.tv/v/[A-z0-9]+)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -35,11 +35,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
data = httptools.downloadpage(page_url).data
|
||||
patron = 'https://www.rapidvideo.com/e/[^"]+'
|
||||
match = scrapertools.find_multiple_matches(data, patron)
|
||||
for url1 in match:
|
||||
res = scrapertools.find_single_match(url1, '=(\w+)')
|
||||
data = httptools.downloadpage(url1).data
|
||||
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
|
||||
ext = scrapertools.get_filename_from_url(url)[-4:]
|
||||
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])
|
||||
|
||||
if match:
|
||||
for url1 in match:
|
||||
res = scrapertools.find_single_match(url1, '=(\w+)')
|
||||
data = httptools.downloadpage(url1).data
|
||||
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
|
||||
ext = scrapertools.get_filename_from_url(url)[-4:]
|
||||
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])
|
||||
else:
|
||||
patron = 'data-setup.*?src="([^"]+)".*?'
|
||||
patron += 'type="([^"]+)"'
|
||||
match = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, ext in match:
|
||||
video_urls.append(['%s [rapidvideo]' % (ext), url])
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user