Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2018-02-22 12:13:35 -03:00
15 changed files with 296 additions and 1766 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.4.16" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.4.18" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,8 +19,9 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» userscloud » hdfull
» peliculasgratis
» seriesblanco » rapidvideo
» kbagi » bitertv
» doomtv » miltorrents
¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -7,6 +7,7 @@ import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
@@ -219,23 +220,38 @@ def newest(categoria):
return itemlist
def get_vip(item, url):
logger.info()
itemlist = []
data = httptools.downloadpage(url+'/videocontent').data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
video_id = scrapertools.find_single_match(data, 'id=videoInfo ><span >(.*?)</span>')
new_url = 'https://v.d0stream.com/api/videoinfo/%s?src-url=https://Fv.d0stream.com' % video_id
json_data = httptools.downloadpage(new_url).data
dict_data = jsontools.load(json_data)
sources = dict_data['sources']
for vip_item in sources['mp4_cdn']:
vip_url= vip_item['url']
vip_quality = vip_item['label']
title ='%s [%s]' % (item.title, vip_quality)
itemlist.append(item.clone(title = title, url=vip_url, action='play', quality=vip_quality, server='directo'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)')
data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data
data_m3u8 = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data_m3u8)
url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),')
itemlist.append(item.clone(url=url_m3u8, action='play'))
player_vip = scrapertools.find_single_match(data, 'class=movieplay><iframe src=(https://v.d0stream.com.*?) frameborder')
itemlist.extend(get_vip(item, player_vip))
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
quality = scrapertools.find_single_match(data, '<div class=les-content><a href=#%s>(.*?)<\/a><\/div>'%option)
title = '%s (%s)' % (item.title, quality)
if 'content' in urls:
urls = '%s%s'%('http:',urls)
hidden_data = httptools.downloadpage(urls).data
@@ -248,20 +264,18 @@ def findvideos(item):
new_item = Item(
channel = item.channel,
url = videoitem,
title = title,
title = item.title,
contentTitle = item.title,
action = 'play',
quality = quality
)
itemlist.append(new_item)
else:
new_item = Item(
channel=item.channel,
url=urls,
title=title,
title=item.title,
contentTitle=item.title,
action='play',
quality = quality
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)

View File

@@ -134,14 +134,14 @@ def listado(item):
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
patron = '<div class="size">(.*?)</div></div></div>'
patron = 'data-file-id(.*?)</div></div></li>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
continue
size = scrapertools.find_single_match(block, '<p>([^<]+)</p>')
scrapedurl, scrapedtitle = scrapertools.find_single_match(block,
'<div class="name"><a href="([^"]+)".*?>([^<]+)<')
size = scrapertools.find_single_match(block, '<p.*?>([^<]+)</p>')
patron = 'class="name"><a href="([^"]+)".*?>([^<]+)<'
scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron)
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
if scrapedthumbnail:
try:
@@ -161,7 +161,6 @@ def listado(item):
else:
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
scrapedurl = item.extra + scrapedurl
title = "%s (%s)" % (scrapedtitle, size)
if "adult_info" in block:
@@ -186,7 +185,7 @@ def listado(item):
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"')
next_page = scrapertools.find_single_match(data, 'class="pageSplitter" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)

File diff suppressed because it is too large Load Diff

View File

@@ -1,76 +0,0 @@
{
"id": "peliculasnu",
"name": "Peliculas.Nu",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"thumbnail": "http://i.imgur.com/2iupwXE.png",
"banner": "peliculasnu.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"llvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,284 +0,0 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
__modo_grafico__ = config.get_setting("modo_grafico", "peliculasnu")
__perfil__ = config.get_setting("perfil", "peliculasnu")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
host = "http://peliculas.nu/"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Novedades", action="entradas", url=host, fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="Más Vistas", action="entradas", url=host + "mas-vistas",
fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="Mejor Valoradas", action="entradas", url=host + "mejor-valoradas",
fanart="http://i.imgur.com/c3HS8kj.png"))
item.text_color = color2
itemlist.append(item.clone(title="En Español", action="entradas", url=host + "?s=Español",
fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="En Latino", action="entradas", url=host + "?s=Latino",
fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(
item.clone(title="En VOSE", action="entradas", url=host + "?s=VOSE", fanart="http://i.imgur.com/c3HS8kj.png"))
item.text_color = color3
itemlist.append(item.clone(title="Por género", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="Por letra", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = "%s?s=%s" % (host, texto)
item.action = "entradas"
return entradas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
elif categoria == "terror":
item.url = host+"terror/"
elif categoria == 'castellano':
item.url = host + "?s=Español"
elif categoria == 'latino':
item.url = host + "?s=Latino"
item.from_newest = True
item.action = "entradas"
itemlist = entradas(item)
if itemlist[-1].action == "entradas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
def entradas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="TPostMv">.*?href="([^"]+)".*?src="([^"]+)".*?class="Title">([^<]+)<.*?' \
'.*?"Date AAIco-date_range">(\d+).*?class="Qlty">([^<]+)<.*?<p class="Idioma(.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
if item.extra == "next":
matches_ = matches[15:]
else:
matches_ = matches[:15]
for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad, data_idioma in matches_:
idiomas = []
if "/espm" in data_idioma:
idiomas.append("CAST")
if "/latinom" in data_idioma:
idiomas.append("LAT")
if "/vosemi" in data_idioma:
idiomas.append("VOSE")
titulo = "%s [%s]" % (scrapedtitle, calidad)
if idiomas:
titulo += " [%s]" % "/".join(idiomas)
scrapedthumbnail = scrapedthumbnail.replace("-160x242", "")
infolabels = {'year': year}
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle,
language=idiomas, quality=calidad))
if not item.from_newest:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if not item.extra and len(matches) > 15:
itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3))
elif item.extra == "next":
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra=""))
return itemlist
def listado(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td class="MvTbImg">.*?href="([^"]+)".*?src="([^"]+)".*?<strong>([^<]+)<.*?' \
'.*?<td>(\d+).*?class="Qlty">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
if item.extra == "next":
matches_ = matches[15:]
else:
matches_ = matches[:15]
for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad in matches_:
titulo = "%s [%s]" % (scrapedtitle, calidad)
scrapedthumbnail = scrapedthumbnail.replace("-55x85", "")
infolabels = {'year': year}
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if not item.extra and len(matches) > 15:
itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3))
elif item.extra == "next":
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra=""))
return itemlist
def indices(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
if "letra" in item.title:
action = "listado"
bloque = scrapertools.find_single_match(data, '<ul class="AZList">(.*?)</ul>')
else:
action = "entradas"
bloque = scrapertools.find_single_match(data, 'Géneros</a>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li.*?<a href="([^"]+)">([^<]+)</a>')
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action=action, url=scrapedurl, title=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="Description">.*?<p>(.*?)</p>')
fanart = scrapertools.find_single_match(data, '<img class="TPostBg" src="([^"]+)"')
if not item.fanart and fanart:
item.fanart = fanart
patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for tipo, source, title in matches:
if tipo == "trailer":
continue
post = "source=%s&action=obtenerurl" % urllib.quote(source)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
data_url = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post, headers=headers).data
url = jsontools.load(data_url).get("url")
if 'openload' in url:
url = url + '|' + item.url
extra_info = title.split(' - ')
title = "%s - %s" % ('%s', title)
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, language=extra_info[0],
quality=extra_info[1],text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if item.extra != "findvideos" and config.get_videolibrary_support():
itemlist.append(
item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library", extra="findvideos",
text_color="green"))
return itemlist
def play(item):
logger.info()
itemlist = []
if "drive.php?v=" in item.url:
if not item.url.startswith("http:") and not item.url.startswith("https:"):
item.url = "http:" + item.url
data = httptools.downloadpage(item.url, add_referer=True).data.replace("\\", "")
subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'")
patron = '"label":\s*"([^"]+)","type":\s*"video/([^"]+)","(?:src|file)":\s*"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, extension, url in matches:
url = url.replace(",", "%2C")
title = ".%s %s [directo]" % (extension, calidad)
itemlist.append([title, url, 0, subtitulo])
try:
itemlist.sort(key=lambda it: int(it[0].split(" ")[1].split("p")[0]))
except:
pass
elif "metiscs" in item.url:
import base64
from lib import jsunpack
item.url = item.url.replace("https:", "http:")
if not item.url.startswith("http:"):
item.url = "http:" + item.url
data = httptools.downloadpage(item.url, add_referer=True).data
str_encode = scrapertools.find_multiple_matches(data, '(?:\+|\()"([^"]+)"')
data = base64.b64decode("".join(str_encode))
packed = scrapertools.find_single_match(data, '(eval\(function.*?)(?:</script>|\}\)\))')
if not packed:
packed = data
data_js = jsunpack.unpack(packed)
subtitle = scrapertools.find_single_match(data_js, 'tracks:\[\{"file":"([^"]+)"')
patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"'
matches = scrapertools.find_multiple_matches(data_js, patron)
for url, calidad, extension in matches:
url = url.replace(",", "%2C")
title = ".%s %s [directo]" % (extension, calidad)
itemlist.insert(0, [title, url, 0, subtitle])
else:
return [item]
return itemlist

View File

@@ -19,9 +19,9 @@
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
"default": true,
"enabled": true,
"visible": true
}
]
}
}

View File

@@ -7,14 +7,19 @@ import re
import urllib
from platformcode import logger
from platformcode import config
from core import jsontools
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://pelisplus.co'
CHANNEL_HEADERS = [
["Host", host.replace("http://","")],
["X-Requested-With", "XMLHttpRequest"]
]
def mainlist(item):
logger.info()
@@ -55,8 +60,53 @@ def movie_menu(item):
seccion='anios'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + "/suggest/?query=",
type="m",
seccion='buscar'
))
return itemlist
def search(item, texto):
logger.info()
if not item.type:
item.type = "m"
item.url = host + "/suggest/?query="
item.url = item.url + texto
if texto != '':
return sub_search(item)
else:
return []
def sub_search(item):
logger.info()
itemlist =[]
data = httptools.downloadpage(item.url, add_referer=True).data
dict_data = jsontools.load(data)
list =dict_data["data"] [item.type]
if item.type == "m":
action = "findvideos"
else:
action = "seasons"
for dict in list:
itemlist.append(item.clone(channel = item.channel,
action = action,
fulltitle = dict["title"],
show = dict["title"],
infoLabels={"year":dict["release_year"]},
thumbnail = "http://static.pelisfox.tv/static/movie/" + dict["cover"],
title = dict["title"] + " (" + dict["release_year"] + ")",
url = host + dict["slug"]
))
tmdb.set_infoLabels(itemlist)
return itemlist
def series_menu(item):
logger.info()
@@ -69,6 +119,13 @@ def series_menu(item):
type='serie'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + "/suggest/?query=",
type="s",
seccion='buscar'
))
return itemlist
@@ -82,40 +139,34 @@ def get_source(url):
def list_all (item):
logger.info ()
itemlist = []
if item.type not in ['normal', 'seccion', 'serie']:
post = {'page':item.page, 'type':item.type,'id':item.id}
post = urllib.urlencode(post)
data =httptools.downloadpage(item.url, post=post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
else:
data = get_source(item.url)
if item.type == 'serie' or item.type == 'recents':
if item.type in ['serie','recents']:
contentType = 'serie'
action = 'seasons'
else:
contentType = 'pelicula'
action = 'findvideos'
patron = 'item-%s><a href=(.*?)><figure><img.*?data-src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})<\/span>'%contentType
matches = re.compile(patron,re.DOTALL).findall(data)
if item.type not in ['normal', 'seccion', 'serie']:
post = {'page':item.page, 'type':item.type,'slug':item.slug,'id':item.id}
post = urllib.urlencode(post)
data =httptools.downloadpage(item.url, post=post, headers=CHANNEL_HEADERS).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron ='<a href=(.*?)><figure><img.*?src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})<\/span>'
else:
data = get_source(item.url)
patron = 'item-%s><a href=(.*?)><figure><img.*?data-src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})</span>'%contentType
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
url = host+scrapedurl+'p001/'
thumbnail = scrapedthumbnail
plot= ''
contentTitle=scrapedtitle
title = contentTitle
year = scrapedyear
fanart =''
new_item=item.clone(action=action,
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
plot="",
fanart="",
infoLabels ={'year':year}
)
if contentType =='serie':
@@ -141,13 +192,15 @@ def list_all (item):
else:
id =''
else:
if not item.page:
item.page = "1"
page = str(int(item.page)+1)
id = item.id
if type =='recents':
type_pagination = '/series/pagination'
type_pagination = '/series/pagination/'
else:
type_pagination = '/pagination'
type_pagination = '/pagination/'
url = host+type_pagination
@@ -164,29 +217,35 @@ def seccion(item):
logger.info()
itemlist = []
data = get_source(item.url)
page = "1"
if item.seccion == 'generos':
patron = '<li><a href=(.*?)><i class=ion-cube><\/i>(.*?)<\/span>'
type = 'genre'
pat = 'genero/'
elif item.seccion == 'anios':
patron = '<li><a href=(\/peliculas.*?)>(\d{4})<\/a>'
type = 'year'
matches = re.compile(patron, re.DOTALL).findall(data)
pat = 'peliculas-'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if item.seccion == 'generos':
cant = re.sub(r'.*?<span class=cant-genre>','',scrapedtitle)
only_title = re.sub(r'<.*','',scrapedtitle).rstrip()
title = only_title+' (%s)'%cant
url = host+scrapedurl
slug = scrapertools.find_single_match(scrapedurl, "%s(.*?)/" %pat)
if item.seccion in ['generos', 'anios']:
url = host + "/pagination/"
itemlist.append(
Item(channel=item.channel,
action="list_all",
title=title,
Item(action="list_all",
channel=item.channel,
fulltitle=item.title,
url=url,
type = 'seccion'
page = "1",
slug = slug,
title=title,
type = type,
url=url
))
# Paginacion

View File

@@ -10,6 +10,7 @@ from core import scrapertoolsV2
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from channels import autoplay
@@ -108,7 +109,7 @@ def extract_series_from_data(item, data):
context.extend(context2)
itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url),
action=action, show=name,
action=action, show=name, contentSerieName=name,
thumbnail=img,
context=context))
@@ -121,6 +122,7 @@ def extract_series_from_data(item, data):
# logger.debug("Adding previous page item")
itemlist.append(item.clone(title="<< Anterior", extra=item.extra - 1))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -189,7 +191,8 @@ def search(item, texto):
for url, img, title in shows:
title = title.strip()
itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title,
thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES)))
thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES),
contentSerieName=title))
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
@@ -222,12 +225,18 @@ def episodios(item):
re.findall("banderas/([^\.]+)", flags, re.MULTILINE)])
filter_lang = idiomas.replace("[", "").replace("]", "").split(" ")
display_title = "%s - %s %s" % (item.show, title, idiomas)
season_episode = scrapertoolsV2.get_season_and_episode(title).split('x')
item.infoLabels['season']= season_episode[0]
item.infoLabels['episode'] = season_episode[1]
# logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url)))
itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url),
action="findvideos", plot=plot, fanart=fanart, language=filter_lang))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))

View File

@@ -1258,13 +1258,13 @@ class Tmdb(object):
self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
self.temporada[numtemporada] = {"episodes": {}}
# if "status_code" in self.temporada[numtemporada]:
# # Se ha producido un error
# msg = "La busqueda de " + buscando + " no dio resultados."
# msg += "\nError de tmdb: %s %s" % (
# self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
# logger.debug(msg)
# self.temporada[numtemporada] = {"episodes": {}}
if "status_code" in self.temporada[numtemporada]:
#Se ha producido un error
msg = "La busqueda de " + buscando + " no dio resultados."
msg += "\nError de tmdb: %s %s" % (
self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
logger.debug(msg)
self.temporada[numtemporada] = {"episodes": {}}
return self.temporada[numtemporada]

View File

@@ -4,19 +4,23 @@
# -*- By the Alfa Develop Group -*
import os
import xbmc
from core import httptools
from core.item import Item
from platformcode.platformtools import logger, config
from core import jsontools
from core.item import Item
from platformcode import config
from platformcode import logger
from threading import Thread
client_id = "c40ba210716aee87f6a9ddcafafc56246909e5377b623b72c15909024448e89d"
client_secret = "999164f25832341f0214453bb11c915adb18e9490d6b5e9a707963a5a1bee43e"
def auth_trakt():
item = Item()
folder = (config.get_platform() == "plex")
item.folder=folder
item.folder = folder
# Autentificación de cuenta Trakt
headers = {'Content-Type': 'application/json', 'trakt-api-key': client_id, 'trakt-api-version': '2'}
try:
@@ -48,7 +52,6 @@ def auth_trakt():
def token_trakt(item):
from platformcode import platformtools
headers = {'Content-Type': 'application/json', 'trakt-api-key': client_id, 'trakt-api-version': '2'}
@@ -63,8 +66,6 @@ def token_trakt(item):
data = jsontools.load(data)
elif item.action == "token_trakt":
url = "http://api-v2launch.trakt.tv/oauth/device/token"
post = {'code': item.device_code, 'client_id': client_id, 'client_secret': client_secret}
post = jsontools.dump(post)
post = "code=%s&client_id=%s&client_secret=%s" % (item.device_code, client_id, client_secret)
data = httptools.downloadpage(url, post, headers, replace_headers=True).data
data = jsontools.load(data)
@@ -72,7 +73,8 @@ def token_trakt(item):
import time
dialog_auth = platformtools.dialog_progress("Sincronizar con Trakt. No cierres esta ventana",
"1. Entra en la siguiente url: %s" % item.verify_url,
"2. Ingresa este código en la página y acepta: %s" % item.user_code,
"2. Ingresa este código en la página y acepta: %s"
% item.user_code,
"3. Espera a que se cierre esta ventana")
# Generalmente cada 5 segundos se intenta comprobar si el usuario ha introducido el código
@@ -80,7 +82,7 @@ def token_trakt(item):
time.sleep(item.intervalo)
try:
if dialog_auth.iscanceled():
config.set_setting("trakt_sync", 'false' )
config.set_setting("trakt_sync", False)
return
url = "http://api-v2launch.trakt.tv/oauth/device/token"
@@ -128,7 +130,6 @@ def token_trakt(item):
def get_trakt_watched(id_type, mediatype, update=False):
logger.info()
id_list = []
@@ -151,35 +152,36 @@ def get_trakt_watched(id_type, mediatype, update=False):
if token_auth:
try:
token_auth = config.get_setting("token_trakt", "trakt")
headers = [['Content-Type', 'application/json'], ['trakt-api-key', client_id], ['trakt-api-version', '2']]
headers = [['Content-Type', 'application/json'], ['trakt-api-key', client_id],
['trakt-api-version', '2']]
if token_auth:
headers.append(['Authorization', "Bearer %s" % token_auth])
url = "https://api.trakt.tv/sync/watched/%s" % mediatype
data = httptools.downloadpage(url, headers=headers, replace_headers=True).data
watched_dict = jsontools.load(data)
data = httptools.downloadpage(url, headers=headers, replace_headers=True).data
watched_dict = jsontools.load(data)
if mediatype == 'shows':
if mediatype == 'shows':
dict_show = dict()
for item in watched_dict:
temp =[]
id = str(item['show']['ids']['tmdb'])
season_dict=dict()
for season in item['seasons']:
ep=[]
number = str(season['number'])
#season_dict = dict()
for episode in season['episodes']:
ep.append(str(episode['number']))
season_dict[number]=ep
temp.append(season_dict)
dict_show[id] = season_dict
id_dict=dict_show
return id_dict
dict_show = dict()
for item in watched_dict:
temp = []
id_ = str(item['show']['ids']['tmdb'])
season_dict = dict()
for season in item['seasons']:
ep = []
number = str(season['number'])
# season_dict = dict()
for episode in season['episodes']:
ep.append(str(episode['number']))
season_dict[number] = ep
temp.append(season_dict)
dict_show[id_] = season_dict
id_dict = dict_show
return id_dict
elif mediatype == 'movies':
for item in watched_dict:
id_list.append(str(item['movie']['ids'][id_type]))
elif mediatype == 'movies':
for item in watched_dict:
id_list.append(str(item['movie']['ids'][id_type]))
except:
pass
@@ -188,14 +190,14 @@ def get_trakt_watched(id_type, mediatype, update=False):
def trakt_check(itemlist):
id_result = ''
#check = u'\u221a'
# check = u'\u221a'
check = 'v'
get_sync_from_file()
try:
for item in itemlist:
info = item.infoLabels
if info != '' and info['mediatype'] in ['movie', 'episode'] and item.channel !='videolibrary':
if info != '' and info['mediatype'] in ['movie', 'episode'] and item.channel != 'videolibrary':
mediatype = 'movies'
id_type = 'tmdb'
@@ -206,12 +208,12 @@ def trakt_check(itemlist):
if id_result == '':
id_result = get_trakt_watched(id_type, mediatype)
if info['mediatype'] == 'movie':
if info[id_type+'_id'] in id_result:
item.title ='[COLOR limegreen][%s][/COLOR] %s' % (check, item.title)
if info[id_type + '_id'] in id_result:
item.title = '[COLOR limegreen][%s][/COLOR] %s' % (check, item.title)
elif info['mediatype']=='episode':
if info[id_type+'_id'] in id_result:
id= info[id_type+'_id']
elif info['mediatype'] == 'episode':
if info[id_type + '_id'] in id_result:
id = info[id_type + '_id']
if info['season'] != '' and info['episode'] != '':
season = str(info['season'])
@@ -223,38 +225,40 @@ def trakt_check(itemlist):
if episode in season_watched:
item.title = '[B][COLOR limegreen][[I]%s[/I]][/COLOR][/B] %s' % (check,
item.title)
item.title)
else:
break
except:
pass
pass
return itemlist
def get_sync_from_file():
logger.info()
sync_path = os.path.join(config.get_data_path(),'settings_channels' ,'trakt')
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt')
trakt_node = {}
if os.path.exists(sync_path):
trakt_node = jsontools.get_node_from_file('trakt', "TRAKT")
trakt_node['movies']=get_trakt_watched('tmdb', 'movies')
trakt_node['shows']=get_trakt_watched('tmdb', 'shows')
trakt_node['movies'] = get_trakt_watched('tmdb', 'movies')
trakt_node['shows'] = get_trakt_watched('tmdb', 'shows')
jsontools.update_node(trakt_node, 'trakt', 'TRAKT')
def update_trakt_data(mediatype, trakt_data):
logger.info()
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt')
trakt_node = {}
if os.path.exists(sync_path):
trakt_node = jsontools.get_node_from_file('trakt', "TRAKT")
trakt_node[mediatype] = trakt_data
jsontools.update_node(trakt_node, 'trakt', 'TRAKT')
def ask_install_script():
logger.info()
import xbmc
from platformcode import platformtools
respuesta = platformtools.dialog_yesno("Alfa", "Puedes instalar el script de Trakt a continuacíon, "
@@ -265,5 +269,24 @@ def ask_install_script():
xbmc.executebuiltin("InstallAddon(script.trakt)")
return
else:
config.set_setting('install_trakt','false')
config.set_setting('install_trakt', False)
return
def wait_for_update_trakt():
logger.info()
t = Thread(update_all)
t.setDaemon(True)
t.start()
t.isAlive()
def update_all():
from time import sleep
logger.info()
sleep(20)
while xbmc.Player().isPlaying():
sleep(20)
for mediatype in ['movies', 'shows']:
trakt_data = get_trakt_watched('tmdb', mediatype, True)
update_trakt_data(mediatype, trakt_data)

View File

@@ -63,56 +63,10 @@ def run(item=None):
elif item.action == "getmainlist":
import channelselector
# # Check for updates only on first screen
# if config.get_setting("check_for_plugin_updates") == True:
# logger.info("Check for plugin updates enabled")
# from core import updater
#
# try:
# config.set_setting("plugin_updates_available", 0)
# new_published_version_tag, number_of_updates = updater.get_available_updates()
#
# config.set_setting("plugin_updates_available", number_of_updates)
# itemlist = channelselector.getmainlist()
#
# if new_published_version_tag != "":
# platformtools.dialog_notification(new_published_version_tag + " disponible",
# "Ya puedes descargar la nueva versión del plugin\n"
# "desde el listado principal")
#
# itemlist = channelselector.getmainlist()
# itemlist.insert(0, Item(title="Descargar version " + new_published_version_tag,
# version=new_published_version_tag, channel="updater",
# action="update",
# thumbnail=channelselector.get_thumb("update.png")))
# except:
# import traceback
# logger.error(traceback.format_exc())
# platformtools.dialog_ok("No se puede conectar", "No ha sido posible comprobar",
# "si hay actualizaciones")
# logger.error("Fallo al verificar la actualización")
# config.set_setting("plugin_updates_available", 0)
# itemlist = channelselector.getmainlist()
#
# else:
# logger.info("Check for plugin updates disabled")
# config.set_setting("plugin_updates_available", 0)
# itemlist = channelselector.getmainlist()
itemlist = channelselector.getmainlist()
platformtools.render_items(itemlist, item)
# # Action for updating plugin
# elif item.action == "update":
#
# from core import updater
# updater.update(item)
# config.set_setting("plugin_updates_available", 0)
#
# import xbmc
# xbmc.executebuiltin("Container.Refresh")
# Action for channel types on channelselector: movies, series, etc.
elif item.action == "getchanneltypes":
import channelselector
@@ -277,11 +231,6 @@ def run(item=None):
else:
logger.info("Executing channel '%s' method" % item.action)
itemlist = getattr(channel, item.action)(item)
# if item.start:
# menu_icon = get_thumb('menu.png')
# menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon,
# title='Menu')
# itemlist.insert(0, menu)
if config.get_setting('trakt_sync'):
token_auth = config.get_setting("token_trakt", "trakt")
if not token_auth:
@@ -293,7 +242,7 @@ def run(item=None):
trakt_tools.ask_install_script()
itemlist = trakt_tools.trakt_check(itemlist)
else:
config.set_setting('install_trakt', 'true')
config.set_setting('install_trakt', True)
platformtools.render_items(itemlist, item)

View File

@@ -16,31 +16,21 @@ import config
import xbmc
import xbmcgui
import xbmcplugin
from core.item import Item
from core import scrapertools
from core import httptools
from core import jsontools
from platformcode import logger
from channelselector import get_thumb
from core import trakt_tools
from core.item import Item
from platformcode import logger
class XBMCPlayer( xbmc.Player ):
class XBMCPlayer(xbmc.Player):
def __init__( self, *args ):
def __init__(self, *args):
pass
def onPlaybackEnded(self):
logger.info()
from time import sleep
sleep(20)
for mediatype in ['movies', 'shows']:
trakt_data = trakt_tools.get_trakt_watched('tmdb', mediatype, True)
trakt_tools.update_trakt_data(mediatype, trakt_data)
xbmc_player = XBMCPlayer()
def dialog_ok(heading, line1, line2="", line3=""):
dialog = xbmcgui.Dialog()
return dialog.ok(heading, line1, line2, line3)
@@ -116,7 +106,6 @@ def render_items(itemlist, parent_item):
"""
# Si el itemlist no es un list salimos
if not type(itemlist) == list:
return
if parent_item.start:
@@ -176,7 +165,6 @@ def render_items(itemlist, parent_item):
listitem.setThumbnailImage(item.thumbnail)
listitem.setProperty('fanart_image', fanart)
# No need it, use fanart instead
# xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg"))
@@ -226,12 +214,11 @@ def render_items(itemlist, parent_item):
if config.get_setting("forceview"):
viewmode_id = get_viewmode_id(parent_item)
xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id)
if parent_item.mode in ['silent', 'get_cached', 'set_cache','finish']:
if parent_item.mode in ['silent', 'get_cached', 'set_cache', 'finish']:
xbmc.executebuiltin("Container.SetViewMode(500)")
def get_viewmode_id(parent_item):
# viewmode_json habria q guardarlo en un archivo y crear un metodo para q el user fije sus preferencias en:
# user_files, user_movies, user_tvshows, user_season y user_episodes.
viewmode_json = {'skin.confluence': {'default_files': 50,
@@ -388,7 +375,7 @@ def set_context_commands(item, parent_item):
"XBMC.RunScript(script.extendedinfo,info=seasoninfo,%s)" % param))
elif item.contentType == "tvshow" and (item.infoLabels['tmdb_id'] or item.infoLabels['tvdb_id'] or
item.infoLabels['imdb_id'] or item.contentSerieName):
item.infoLabels['imdb_id'] or item.contentSerieName):
param = "id =%s,tvdb_id=%s,imdb_id=%s,name=%s" \
% (item.infoLabels['tmdb_id'], item.infoLabels['tvdb_id'], item.infoLabels['imdb_id'],
item.contentSerieName)
@@ -396,14 +383,14 @@ def set_context_commands(item, parent_item):
"XBMC.RunScript(script.extendedinfo,info=extendedtvinfo,%s)" % param))
elif item.contentType == "movie" and (item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or
item.contentTitle):
item.contentTitle):
param = "id =%s,imdb_id=%s,name=%s" \
% (item.infoLabels['tmdb_id'], item.infoLabels['imdb_id'], item.contentTitle)
context_commands.append(("ExtendedInfo",
"XBMC.RunScript(script.extendedinfo,info=extendedinfo,%s)" % param))
# InfoPlus
if config.get_setting("infoplus") == True:
if config.get_setting("infoplus"):
if item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.infoLabels['tvdb_id'] or \
(item.contentTitle and item.infoLabels["year"]) or item.contentSerieName:
context_commands.append(("InfoPlus", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(
@@ -423,11 +410,11 @@ def set_context_commands(item, parent_item):
(sys.argv[0], item.clone(channel="favorites", action="addFavourite",
from_channel=item.channel,
from_action=item.action).tourl())))
#Buscar en otros canales
if item.contentType in ['movie','tvshow']and item.channel != 'search':
# Buscar en otros canales
if item.contentType in ['movie', 'tvshow'] and item.channel != 'search':
# Buscar en otros canales
if item.contentSerieName!='':
item.wanted=item.contentSerieName
if item.contentSerieName != '':
item.wanted = item.contentSerieName
else:
item.wanted = item.contentTitle
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
@@ -437,14 +424,14 @@ def set_context_commands(item, parent_item):
from_channel=item.channel,
contextual=True).tourl())))
#Definir como Pagina de inicio
# Definir como Pagina de inicio
if config.get_setting('start_page'):
if item.action not in ['findvideos', 'play']:
context_commands.insert(0, ("[COLOR 0xffccff00]Definir como pagina de inicio[/COLOR]",
"XBMC.RunPlugin(%s?%s)" % (
sys.argv[0], Item(channel='side_menu',
action="set_custom_start",
parent=item.tourl()).tourl())))
sys.argv[0], Item(channel='side_menu',
action="set_custom_start",
parent=item.tourl()).tourl())))
if item.channel != "videolibrary":
# Añadir Serie a la videoteca
@@ -505,21 +492,17 @@ def set_context_commands(item, parent_item):
context_commands.append(("Super Favourites Menu",
"XBMC.RunScript(special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py)"))
context_commands = sorted(context_commands, key=lambda comand: comand[0])
# Menu Rapido
context_commands.insert(0,("[COLOR 0xffccff00]<Menú Rápido>[/COLOR]",
"XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(channel='side_menu',
action="open_menu",
parent=parent_item.tourl()).tourl(
context_commands.insert(0, ("[COLOR 0xffccff00]<Menú Rápido>[/COLOR]",
"XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(channel='side_menu',
action="open_menu",
parent=parent_item.tourl()).tourl(
))))
))))
return context_commands
def is_playing():
return xbmc_player.isPlaying()
@@ -600,10 +583,7 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
set_player(item, xlistitem, mediaurl, view, strm)
def stop_video():
from time import sleep
xbmc_player.stop()
@@ -787,7 +767,7 @@ def set_opcion(item, seleccion, opciones, video_urls):
listitem = xbmcgui.ListItem(item.title)
if config.get_platform(True)['num_version'] >= 16.0:
listitem.setArt({'icon':"DefaultVideo.png", 'thumb': item.thumbnail})
listitem.setArt({'icon': "DefaultVideo.png", 'thumb': item.thumbnail})
else:
listitem.setIconImage("DefaultVideo.png")
listitem.setThumbnailImage(item.thumbnail)
@@ -816,20 +796,6 @@ def set_opcion(item, seleccion, opciones, video_urls):
favorites.addFavourite(item)
salir = True
# "Añadir a videoteca":
elif opciones[seleccion] == config.get_localized_string(30161):
titulo = item.fulltitle
if titulo == "":
titulo = item.title
new_item = item.clone(title=titulo, action="play_from_library", category="Cine",
fulltitle=item.fulltitle, channel=item.channel)
from core import videolibrarytools
videolibrarytools.add_movie(new_item)
salir = True
# "Buscar Trailer":
elif opciones[seleccion] == config.get_localized_string(30162):
config.set_setting("subtitulo", False)
@@ -911,11 +877,11 @@ def set_player(item, xlistitem, mediaurl, view, strm):
playlist.add(mediaurl, xlistitem)
# Reproduce
#xbmc_player = xbmc_player
# xbmc_player = xbmc_player
xbmc_player.play(playlist, xlistitem)
while xbmc_player.isPlaying():
xbmc.sleep(200)
xbmc_player.onPlaybackEnded()
if config.get_setting('trakt_sync'):
trakt_tools.wait_for_update_trakt()
# elif config.get_setting("player_mode") == 1 or item.isPlayable:
elif config.get_setting("player_mode") == 1:
logger.info("mediaurl :" + mediaurl)
@@ -1067,7 +1033,7 @@ def play_torrent(item, xlistitem, mediaurl):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
playlist.add(videourl, xlistitem)
#xbmc_player = xbmc_player
# xbmc_player = xbmc_player
xbmc_player.play(playlist)
# Marcamos como reproducido para que no se vuelva a iniciar

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://biter.tv/v/[A-z0-9]+)",
"pattern": "(http://b.ter.tv/v/[A-z0-9]+)",
"url": "\\1"
}
]

View File

@@ -35,11 +35,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = httptools.downloadpage(page_url).data
patron = 'https://www.rapidvideo.com/e/[^"]+'
match = scrapertools.find_multiple_matches(data, patron)
for url1 in match:
res = scrapertools.find_single_match(url1, '=(\w+)')
data = httptools.downloadpage(url1).data
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
ext = scrapertools.get_filename_from_url(url)[-4:]
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])
if match:
for url1 in match:
res = scrapertools.find_single_match(url1, '=(\w+)')
data = httptools.downloadpage(url1).data
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
ext = scrapertools.get_filename_from_url(url)[-4:]
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])
else:
patron = 'data-setup.*?src="([^"]+)".*?'
patron += 'type="([^"]+)"'
match = scrapertools.find_multiple_matches(data, patron)
for url, ext in match:
video_urls.append(['%s [rapidvideo]' % (ext), url])
return video_urls