@@ -198,17 +198,16 @@ def findvideos(item):
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if itemlist:
|
||||
if itemlist and item.contentChannel != "videolibrary":
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -25,6 +25,19 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE",
|
||||
"LAT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -10,12 +10,24 @@ from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
IDIOMAS = {'LAT': 'LAT','SUB': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['directo', 'rapidvideo', 'streamango', 'yourupload', 'mailru', 'netutv', 'okru']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
HOST = "https://animeflv.net/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
|
||||
@@ -31,6 +43,9 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse",
|
||||
extra="status"))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -188,32 +203,38 @@ def episodios(item):
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
from core import jsontools
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
|
||||
list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
|
||||
download_list = scrapertools.find_multiple_matches(data, 'video\[\d+\] = \'<iframe .*?src="(.*?)"')
|
||||
for url in download_list:
|
||||
data = httptools.downloadpage(url).data
|
||||
if 'izanagi' in url:
|
||||
new_url = url.replace('embed', 'check')
|
||||
new_data = httptools.downloadpage(new_url).data
|
||||
url = scrapertools.find_single_match(new_data, '"file":"(.*?)"')
|
||||
else:
|
||||
url = scrapertools.find_single_match(data, 'var redir = "(.*?)"')
|
||||
if url != '':
|
||||
url = url.replace("\\","")
|
||||
itemlist.append(item.clone(title='%s', url=url, action='play'))
|
||||
videos = scrapertools.find_single_match(data, 'var videos = (.*?);')
|
||||
videos_json = jsontools.load(videos)
|
||||
for video_lang in videos_json.items():
|
||||
language = video_lang[0]
|
||||
matches = scrapertools.find_multiple_matches(str(video_lang[1]), 'src="([^"]+)"')
|
||||
for source in matches:
|
||||
new_data = httptools.downloadpage(source).data
|
||||
if 'redirector' in source:
|
||||
|
||||
url = scrapertools.find_single_match(new_data, 'window.location.href = "([^"]+)"')
|
||||
elif 'embed' in source:
|
||||
source = source.replace('embed', 'check')
|
||||
new_data = httptools.downloadpage(source).data
|
||||
json_data = jsontools.load(new_data)
|
||||
try:
|
||||
url = json_data['file']
|
||||
except:
|
||||
continue
|
||||
|
||||
itemlist.append(Item(channel=item.channel, url=url, title='%s', action='play', language=language))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if item.video_urls:
|
||||
for it in item.video_urls:
|
||||
title = ".%s %sp [directo]" % (it[1].replace("video/", ""), it[0])
|
||||
itemlist.append([title, it[2]])
|
||||
return itemlist
|
||||
else:
|
||||
return [item]
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "animeflv_me",
|
||||
"name": "Animeflv.ME",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "http://i.imgur.com/x9AdvBx.png",
|
||||
"banner": "http://i.imgur.com/dTZwCPq.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
45
plugin.video.alfa/channels/animeflv_ru.json
Normal file
45
plugin.video.alfa/channels/animeflv_ru.json
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"id": "animeflv_ru",
|
||||
"name": "AnimeFLV.RU",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "http://i.imgur.com/5nRR9qq.png",
|
||||
"banner": "animeflv_ru.png",
|
||||
"compatible": {
|
||||
"python": "2.7.9"
|
||||
},
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -5,10 +5,18 @@ import urlparse
|
||||
|
||||
from channels import renumbertools
|
||||
from core import httptools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from channels import autoplay
|
||||
|
||||
IDIOMAS = {'VOSE': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['directo']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
HOST = "https://animeflv.ru/"
|
||||
|
||||
@@ -16,25 +24,25 @@ HOST = "https://animeflv.ru/"
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "animes/nombre/lista"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar por:"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title=" Título"))
|
||||
itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "animes",
|
||||
extra="genre"))
|
||||
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def clean_title(title):
|
||||
year_pattern = r'\([\d -]+?\)'
|
||||
|
||||
return re.sub(year_pattern, '', title).strip()
|
||||
|
||||
|
||||
@@ -45,32 +53,26 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
post = "value=%s" % texto
|
||||
data = httptools.downloadpage(item.url, post=post).data
|
||||
|
||||
try:
|
||||
dict_data = jsontools.load(data)
|
||||
|
||||
for e in dict_data:
|
||||
title = clean_title(scrapertools.htmlclean(e["name"]))
|
||||
url = e["url"]
|
||||
plot = e["description"]
|
||||
thumbnail = HOST + e["thumb"]
|
||||
thumbnail = e["thumb"]
|
||||
new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail)
|
||||
|
||||
if "Pelicula" in e["genre"]:
|
||||
new_item.contentType = "movie"
|
||||
new_item.contentTitle = title
|
||||
else:
|
||||
new_item.show = title
|
||||
new_item.context = renumbertools.context(item)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -79,41 +81,32 @@ def search_section(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
|
||||
patron = 'id="%s_filter"[^>]+><div class="inner">(.*?)</div></div>' % item.extra
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
matches = re.compile('<a href="([^"]+)"[^>]+>(.*?)</a>', re.DOTALL).findall(data)
|
||||
|
||||
for url, title in matches:
|
||||
url = "%s/nombre/lista" % url
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
|
||||
context=renumbertools.context(item)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
|
||||
if categoria == 'anime':
|
||||
itemlist = novedades_episodios(Item(url=HOST))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def novedades_episodios(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
data = scrapertools.find_single_match(data, '<ul class="ListEpisodios[^>]+>(.*?)</ul>')
|
||||
|
||||
matches = re.compile('href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>'
|
||||
'<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for url, thumbnail, str_episode, show in matches:
|
||||
|
||||
try:
|
||||
episode = int(str_episode.replace("Ep. ", ""))
|
||||
except ValueError:
|
||||
@@ -121,42 +114,31 @@ def novedades_episodios(item):
|
||||
episode = 1
|
||||
else:
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
|
||||
|
||||
title = "%s: %sx%s" % (show, season, str(episode).zfill(2))
|
||||
url = urlparse.urljoin(HOST, url)
|
||||
thumbnail = urlparse.urljoin(HOST, thumbnail)
|
||||
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
|
||||
fulltitle=title)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def novedades_anime(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
|
||||
|
||||
matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for thumbnail, url, title in matches:
|
||||
url = urlparse.urljoin(HOST, url)
|
||||
thumbnail = urlparse.urljoin(HOST, thumbnail)
|
||||
title = clean_title(title)
|
||||
|
||||
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
|
||||
fulltitle=title)
|
||||
|
||||
new_item.show = title
|
||||
new_item.context = renumbertools.context(item)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -166,19 +148,16 @@ def listado(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
|
||||
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
|
||||
|
||||
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
|
||||
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
|
||||
re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for thumbnail, url, title, genres, plot in matches:
|
||||
|
||||
title = clean_title(title)
|
||||
url = urlparse.urljoin(HOST, url)
|
||||
thumbnail = urlparse.urljoin(HOST, thumbnail)
|
||||
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
|
||||
fulltitle=title, plot=plot)
|
||||
|
||||
if "Pelicula Anime" in genres:
|
||||
new_item.contentType = "movie"
|
||||
new_item.contentTitle = title
|
||||
@@ -189,7 +168,6 @@ def listado(item):
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(HOST, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
|
||||
return itemlist
|
||||
|
||||
@@ -203,7 +181,6 @@ def episodios(item):
|
||||
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
|
||||
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
|
||||
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
|
||||
|
||||
for url, title in matches:
|
||||
title = title.strip()
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
@@ -224,27 +201,27 @@ def episodios(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
_id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
|
||||
post = "embed_id=%s" % _id
|
||||
data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
|
||||
dict_data = jsontools.load(data)
|
||||
headers = dict()
|
||||
headers["Referer"] = item.url
|
||||
data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
|
||||
dict_data = jsontools.load(data)
|
||||
if not dict_data:
|
||||
return itemlist
|
||||
list_videos = dict_data["playlist"][0]
|
||||
if isinstance(list_videos, list):
|
||||
for video in list_videos:
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=video["file"],
|
||||
show=re.escape(item.show),
|
||||
title=item.title, plot=item.plot, fulltitle=item.title,
|
||||
thumbnail=item.thumbnail))
|
||||
else:
|
||||
for video in list_videos.values():
|
||||
video += "|User-Agent=Mozilla/5.0"
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=video, show=re.escape(item.show),
|
||||
title=item.title, plot=item.plot, fulltitle=item.title,
|
||||
thumbnail=item.thumbnail))
|
||||
_id = scrapertools.find_single_match(item.url, HOST + 'ver/([^/]+)/')
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'atrl(.*?)choose_quality')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)')
|
||||
headers = {"Referer" : item.url}
|
||||
for url in matches:
|
||||
post = "embed_id=%s" % _id
|
||||
xserver = scrapertools.find_single_match(url, 's=(\w+)')
|
||||
data = httptools.downloadpage(HOST + "get_video_info_v2?s=%s" %xserver, post=post).data
|
||||
dict_data = jsontools.load(data)
|
||||
data = httptools.downloadpage(dict_data["value"], headers=headers).data
|
||||
matches = scrapertools.find_multiple_matches(data, '"file":"([^"]+)"')
|
||||
for url in matches:
|
||||
url = url.replace("\\","")
|
||||
itemlist.append(item.clone(action="play", url=url, title='%s',
|
||||
fulltitle=item.title, language='VOSE'
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -244,8 +244,8 @@ def findvideos(item):
|
||||
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
itemlist.append(item.clone(title ='%s'+title, url=url, action='play',
|
||||
language=IDIOMAS[language], text_color = ""))
|
||||
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for hidden_url, quality, language in matches:
|
||||
@@ -258,10 +258,23 @@ def findvideos(item):
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels, text_color = ""))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist.sort(key=lambda it: (it.language, it.server, it.quality))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
if itemlist:
|
||||
if item.contentChannel != "videolibrary":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
{
|
||||
"id": "peliculasdk",
|
||||
"name": "PeliculasDK",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://s29.postimg.cc/wzw749oon/pldklog.jpg",
|
||||
"banner": "peliculasdk.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vos",
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.scrapertools import decodeHtmlentities as dhe
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
from core import tmdb
|
||||
from channelselector import get_thumb
|
||||
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasdk')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasdk')
|
||||
__adult_mode__ = config.get_setting("adult_mode")
|
||||
|
||||
host = "http://www.peliculasdk.com"
|
||||
|
||||
|
||||
IDIOMAS = {'es': 'Español', 'la': 'Latino', 'su': 'Subtitulado', 'in': 'Inglés'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = ['HD-1080', 'HD-720', 'HD-320', 'BR-R', 'BR-S', 'DVD-R', 'DVD-S', 'TS-HQ', 'TS', 'CAM'] # -R:Rip, -S:Screener
|
||||
list_servers = ['directo', 'streamango', 'powvideo', 'datoporn', 'gamovideo', 'streamplay', 'okru', 'rapidvideo', 'openload']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Estrenos', action='peliculas', url= host + "/ver/estrenos/",
|
||||
thumbnail=get_thumb('newest', auto=True), type='movies'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Por géneros', action='section',
|
||||
thumbnail=get_thumb('genres', auto=True), type='movies'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Por calidades', action='section',
|
||||
thumbnail=get_thumb('quality', auto=True), type='movies'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Por idiomas', action='section',
|
||||
thumbnail=get_thumb('language', auto=True), type='movies'))
|
||||
|
||||
if __adult_mode__ != 0:
|
||||
itemlist.append(Item(channel=item.channel, title='Adultos +18', action='peliculas', url= host + "/genero/adultos/",
|
||||
thumbnail=get_thumb('adults', auto=True), type='movies'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Buscar...', action='search',
|
||||
thumbnail=get_thumb('search', auto=True), type='movies'))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
duplicados=[]
|
||||
data = httptools.downloadpage(host).data
|
||||
|
||||
if 'Por géneros' in item.title:
|
||||
patron = '<li><a href="(\/genero\/[^"]*)">([^<]*)<\/a><\/li>' #<li><a href="/genero/accion">Acción</a></li>
|
||||
elif 'Por calidades' in item.title:
|
||||
patron = "<li><a href='(\/calidad\/[^']*)'>([^<]*)<\/a><\/li>" #<li><a href='/calidad/HD-1080/'>HD 1080</a></li>
|
||||
elif 'Por idiomas' in item.title:
|
||||
patron = "<li><a href='(\/idioma\/[^']*)'>([^<]*)<\/a><\/li>" #<li><a href='/idioma/Espanol/'>Español</a></li>
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if scrapedtitle not in duplicados:
|
||||
itemlist.append(Item(channel=item.channel, url=host + scrapedurl, title=scrapedtitle, action='peliculas',
|
||||
type=item.type))
|
||||
duplicados.append(scrapedtitle)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
|
||||
item.url = host + "/index.php?s=%s&x=0&y=0" % (texto)
|
||||
|
||||
try:
|
||||
return buscador(item)
|
||||
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def buscador(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |&#.*?;", "", data)
|
||||
|
||||
patron = 'style="position:relative"> '
|
||||
patron += '<a href="([^"]+)">'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"></a><br>'
|
||||
patron += '<div class="titulope">([^<]+)</div>.*?'
|
||||
patron += 'Audio: (.+?)</div>.*?'
|
||||
patron += 'Calidad: (.+?)</div>.*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtitleorig, scrapedlenguaje, scrapedcalidad in matches:
|
||||
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
|
||||
scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip()
|
||||
|
||||
audios = scrapertools.find_multiple_matches(scrapedlenguaje, '<a href="[^"]*" rel="[^"]*">([^<]*)</a>')
|
||||
calidad = scrapertools.find_single_match(scrapedcalidad, '<a href="[^"]*" rel="[^"]*">([^<]*)</a>')
|
||||
|
||||
titulo = '%s [%s][%s]' % (scrapedtitle, ','.join([a[:3] for a in audios]), calidad)
|
||||
|
||||
# Parece que las pelis de adultos se mezclan en la búsqueda y lo único que las diferencia es que no tienen Calidad y Audios
|
||||
if (calidad and audios) or __adult_mode__ != 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos", url=scrapedurl,
|
||||
title=titulo, contentTitle=scrapedtitle,
|
||||
thumbnail=scrapedthumbnail,
|
||||
language=audios,
|
||||
quality=calidad,
|
||||
infoLabels={'year':year}
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Paginación
|
||||
url_next_page = scrapertools.find_single_match(data,'<a href="([^"]*)">Siguiente »</a>')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |&#.*?;", "", data)
|
||||
|
||||
patron = 'style="position:relative"> '
|
||||
patron += '<a href="([^"]+)">'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"></a><br>'
|
||||
patron += '<div class="titulope">([^<]+)</div>.*?'
|
||||
patron += 'Audio: (.+?)</div>.*?'
|
||||
patron += 'Calidad: (.+?)</div>.*?'
|
||||
patron += 'Género: (.+?)</div>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtitleorig, scrapedlenguaje, scrapedcalidad, scrapedgenero in matches:
|
||||
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
|
||||
scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip()
|
||||
|
||||
audios = scrapertools.find_multiple_matches(scrapedlenguaje, '<a href="[^"]*" rel="[^"]*">([^<]*)</a>')
|
||||
calidad = scrapertools.find_single_match(scrapedcalidad, '<a href="[^"]*" rel="[^"]*">([^<]*)</a>')
|
||||
generos = scrapertools.find_multiple_matches(scrapedgenero, '<a href="[^"]*" rel="[^"]*">([^<]*)</a>')
|
||||
|
||||
titulo = '%s [%s][%s]' % (scrapedtitle, ','.join([a[:3] for a in audios]), calidad)
|
||||
|
||||
if 'Adultos' not in generos or __adult_mode__ != 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos", url=scrapedurl,
|
||||
title=titulo, contentTitle=scrapedtitle,
|
||||
thumbnail=scrapedthumbnail,
|
||||
language=audios,
|
||||
quality=calidad,
|
||||
infoLabels={'year':year}
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Paginación
|
||||
url_next_page = scrapertools.find_single_match(data,'<a href="([^"]*)">Siguiente »</a>')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
tmdb.set_infoLabels(item, True) # para refrescar infolabels y obtener más datos en "segunda pasada"
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<li><a href="#(tab\d+)"><span class="re">\d<\/span><span class="([^"]+)"><\/span><span class=.*?>([^<]+)<\/span>'
|
||||
check = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not check:
|
||||
patron = '<li><a href="#(tab\d+)">'
|
||||
check = re.compile(patron, re.DOTALL).findall(data)
|
||||
for i, valor in enumerate(check):
|
||||
check[i] = [valor, '', '']
|
||||
|
||||
patron = '<div id="(tab\d+)" class="tab_content">(.*?)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
servers_data_list = []
|
||||
for i, match in enumerate(matches):
|
||||
if match[0] == check[i][0]:
|
||||
if '<iframe' in match[1]:
|
||||
src = scrapertools.find_single_match(match[1], ' src="([^"]*)"')
|
||||
servers_data_list.append([check[i][1], check[i][2], 'iframe', src]) # idioma, calidad, 'iframe', src
|
||||
|
||||
elif '<script' in match[1]:
|
||||
src = scrapertools.find_single_match(match[1], '<script>(.*?)<\/script>')
|
||||
if src:
|
||||
func, parm = scrapertools.find_single_match(src, '(.*?)\("([^"]*)"\)')
|
||||
servers_data_list.append([check[i][1], check[i][2], func, parm ]) # idioma, calidad, func, parm
|
||||
|
||||
data = httptools.downloadpage(host + '/Js/videod.js').data
|
||||
patron = 'function (\w+)\(id\){(.*?)}'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for idioma, calidad, func, parm in servers_data_list:
|
||||
if func == 'iframe':
|
||||
title = "Ver en: %s [" + idioma + "][" + calidad + "]"
|
||||
itemlist.append(
|
||||
item.clone(title=title, url=parm, action="play",
|
||||
thumbnail=item.category,
|
||||
language=idioma, quality=calidad))
|
||||
|
||||
else:
|
||||
for funcion, contenido in matches:
|
||||
if funcion == func:
|
||||
if '<script' in contenido: continue
|
||||
if '<iframe' in contenido:
|
||||
src = scrapertools.find_single_match(contenido, 'src="([^"]*)"')
|
||||
else:
|
||||
src = scrapertools.find_single_match(contenido, 'href="([^"]*)"')
|
||||
if "'+codigo+'" not in src: continue
|
||||
src = src.replace("'+codigo+'", parm)
|
||||
|
||||
title = "Ver en: %s [" + idioma + "][" + calidad + "]"
|
||||
itemlist.append(
|
||||
item.clone(title=title, url=src, action="play",
|
||||
thumbnail=item.category,
|
||||
language=idioma, quality=calidad))
|
||||
break
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
|
||||
'title': item.fulltitle}
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca",
|
||||
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
|
||||
text_color="0xFFff6666",
|
||||
thumbnail='http://imgur.com/0gyYvuC.png'))
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'castellano':
|
||||
item.url = host + "idioma/Espanol/"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
return itemlist
|
||||
@@ -137,11 +137,11 @@ def peliculas(item):
|
||||
# logger.info(data)
|
||||
|
||||
# img, title
|
||||
patron = '(?is)movie-img img-box.*?alt="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'href="([^"]+).*?'
|
||||
patron += 'fechaestreno">([^<]+).*?'
|
||||
patron += 'quality">([^<]+)'
|
||||
patron = '(?is)movie-img img-box.*?alt="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += 'fechaestreno">([^<]+)<.*?'
|
||||
patron += 'quality">([^<]+)<'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
@@ -176,20 +176,19 @@ def genresYears(item):
|
||||
if item.title == "Estrenos":
|
||||
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
|
||||
else:
|
||||
patron_todas = '(?is)genres falsescroll(.*?)</div> </aside'
|
||||
patron_todas = '(?is)data-label="CATEGORIAS">(.*?)show-bigmenu'
|
||||
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
# logger.error(texto='***********uuuuuuu*****' + data)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
|
||||
patron = '<a href="([^"]+)".*?title="([^"]+)"' # url, title
|
||||
# patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, videos_num in matches:
|
||||
title = '%s (%s)' % (scrapedtitle, videos_num.replace('.', ','))
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = '%s' % (scrapedtitle)
|
||||
title = title.replace("Peliculas de ","").replace(" Online","")
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -1,65 +1,39 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://peliscity.com"
|
||||
host = "https://www.pelisvips.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(host).data
|
||||
patron = 'cat-item.*?span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
can = 0
|
||||
for cantidad in matches:
|
||||
can += int(cantidad.replace(".", ""))
|
||||
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Películas: (%s)" %can, text_bold=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=" Últimas agregadas", action="agregadas", url= host,
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title=" Peliculas HD", action="agregadas",
|
||||
url= host + "/calidad/hd-real-720", viewmode="movie_with_plot"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=" Listado por género", action="porGenero", url= host))
|
||||
itemlist.append(Item(channel=item.channel, title=" Idioma", action="porIdioma", url= host))
|
||||
itemlist.append(Item(channel=item.channel, title=" Buscar", action="search", url= host + "/?s="))
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimas", action="agregadas",
|
||||
url= host, viewmode="movie_with_plot", thumbnail = get_thumb("last", auto = True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Género", action="porGenero_Idioma", tipo = "g", url= host, thumbnail = get_thumb("genres", auto = True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Audio", action="porGenero_Idioma", tipo = "a", url= host, thumbnail = get_thumb("audio", auto = True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url= host + "/?s=", thumbnail = get_thumb("search", auto = True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def porIdioma(item):
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Castellano", action="agregadas",
|
||||
url= host + "/idioma/espanol-castellano/", viewmode="movie_with_plot"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="VOSE", action="agregadas", url= host + "/idioma/subtitulada/",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Latino", action="agregadas",
|
||||
url= host + "/idioma/espanol-latino/", viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def porGenero(item):
|
||||
def porGenero_Idioma(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<.*?span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for urlgen, genero, cantidad in matches:
|
||||
cantidad = cantidad.replace(".", "")
|
||||
titulo = genero + " (" + cantidad + ")"
|
||||
bloque = scrapertools.find_single_match(data, 'culas por %s(.*?)slidebar-item' %item.tipo)
|
||||
patron = 'href="([^"]+).*?span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for urlgen, titulo in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="agregadas", title=titulo, url=urlgen, folder=True,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
@@ -80,35 +54,47 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def listaBuscar(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="itemlist".*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += 'text-list">([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, thumbnail, title, sinopsis in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, show=title, plot=sinopsis))
|
||||
return itemlist
|
||||
|
||||
|
||||
def agregadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\r|\t|\s{2}| |"', "", data)
|
||||
patron = scrapertools.find_multiple_matches (data,'<divclass=col-mt-5 postsh>.*?Duración')
|
||||
for element in patron:
|
||||
info = scrapertools.find_single_match(element,
|
||||
"calidad>(.*?)<.*?ahref=(.*?)>.*?'reflectMe' src=(.*?)\/>.*?<h2>(.*?)"
|
||||
"<\/h2>.*?sinopsis>(.*?)<\/div>.*?Año:<\/span>(.*?)<\/li>")
|
||||
quality = info[0]
|
||||
url = info[1]
|
||||
thumbnail = info[2]
|
||||
title = info[3]
|
||||
plot = info[4]
|
||||
year = info[5].strip()
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
contentType = "movie",
|
||||
contentTitle = title,
|
||||
fulltitle = title,
|
||||
infoLabels={'year':year},
|
||||
quality=quality,
|
||||
thumbnail=thumbnail,
|
||||
title=title,
|
||||
url=url
|
||||
bloque = scrapertools.find_single_match(data, '<div id="movie-list"(.*?)<div class="pagination movie-pagination')
|
||||
patron = '(?is)href="([^"]+)".*?'
|
||||
patron += 'class="_format"> <span class=".*?>([^<]+)<.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += '<div class="_audio">(.*?)/div.*?'
|
||||
patron += 'label_year">([^ ]+) '
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedquality, scrapedthumbnail, scrapedtitle, scrapedaudio, scrapedyear in matches:
|
||||
title = scrapedtitle + " (%s)" %scrapedyear
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = 'findvideos',
|
||||
contentTitle = scrapedtitle,
|
||||
fulltitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
quality = scrapedquality,
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = title,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
|
||||
next_page = scrapertools.find_single_match(data, "next'.*?href='([^']+)'")
|
||||
itemlist.append(Item(channel=item.channel, action="agregadas", title='Pagina Siguiente >>',
|
||||
url=next_page.strip(),
|
||||
viewmode="movie_with_plot"))
|
||||
@@ -116,23 +102,6 @@ def agregadas(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def listaBuscar(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n", " ", data)
|
||||
patron = 'class="row"> <a.*?="([^"]+).*?src="([^"]+).*?title="([^"]+).*?class="text-list">(.*?)</p>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url, thumbnail, title, sinopsis in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, show=title, plot=sinopsis))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -144,6 +113,16 @@ def findvideos(item):
|
||||
title = "%s [" + scrapedcalidad + "][" + scrapedidioma +"]"
|
||||
quality = scrapedcalidad
|
||||
language = scrapedidioma
|
||||
if "pelisup.com" in scrapedurl:
|
||||
scrapedurl = scrapedurl.replace("/v/","/api/source/")
|
||||
post = urllib.urlencode({"r":item.url,"d":"www.pelisup.com"})
|
||||
data1 = httptools.downloadpage(scrapedurl, post=post).data
|
||||
json_data = jsontools.load(data1)
|
||||
for dataj in json_data["data"]:
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel, action="play", title=title + " - %s" %dataj["label"], fulltitle=item.title, url="https://www.pelisup.com" + dataj["file"],
|
||||
quality= quality, language=language, extra = item.thumbnail))
|
||||
scrapedurl = "omina.farlante1" # para ya no agregar al itemlist
|
||||
if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
|
||||
@@ -151,7 +130,7 @@ def findvideos(item):
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if itemlist and item.contentChannel != "videolibrary":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
|
||||
@@ -139,7 +139,7 @@ def lista(item):
|
||||
|
||||
if itemlist != []:
|
||||
actual_page_url = item.url
|
||||
next_page = scrapertools.find_single_match(data, '<a class=nextpostslink rel=next href=(.*?)>')
|
||||
next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?) /')
|
||||
if next_page != '':
|
||||
itemlist.append(item.clone(action="lista",
|
||||
title='Siguiente >>>',
|
||||
@@ -192,7 +192,6 @@ def findvideos(item):
|
||||
data = data.replace("<","<").replace(""",'"').replace(">",">").replace("&","&").replace('\"',"")
|
||||
patron = '<div class=TPlayerTb.*?id=(.*?)>.*?src=(.*?) frameborder'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {'referer':item.url}
|
||||
for opt, urls_page in matches:
|
||||
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
|
||||
if 'trembed' in urls_page:
|
||||
@@ -201,6 +200,8 @@ def findvideos(item):
|
||||
urls_page = scrapertools.find_single_match(sub_data, 'src="([^"]+)" ')
|
||||
if "repro.live" in urls_page:
|
||||
server_repro(urls_page)
|
||||
if "repros.live" in urls_page:
|
||||
server_repros(urls_page)
|
||||
if "itatroniks.com" in urls_page:
|
||||
server_itatroniks(urls_page)
|
||||
for url in new_data:
|
||||
@@ -233,6 +234,20 @@ def server_itatroniks(urls_page):
|
||||
new_data.append(urls_page)
|
||||
|
||||
|
||||
def server_repros(urls_page):
|
||||
logger.info()
|
||||
headers = {"Referer":host}
|
||||
headers1 = {"X-Requested-With":"XMLHttpRequest"}
|
||||
sub_data = httptools.downloadpage(urls_page, headers = headers).data
|
||||
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
|
||||
for idurl in urls_page1:
|
||||
#post = {"codigo":idurl}
|
||||
#post = urllib.urlencode(post)
|
||||
dd1 = httptools.downloadpage("https://repros.live/player/ajaxdata", post = urllib.urlencode({"codigo":idurl}), headers = headers1).data
|
||||
data_json = jsontools.load(dd1)
|
||||
new_data.append(data_json["url"])
|
||||
|
||||
|
||||
def server_repro(urls_page):
|
||||
logger.info()
|
||||
headers = {"Referer":urls_page}
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
{
|
||||
"id": "pordede",
|
||||
"name": "Pordede",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "pordede.png",
|
||||
"banner": "pordede.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "pordedeuser",
|
||||
"type": "text",
|
||||
"label": "@30014",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "pordedepassword",
|
||||
"type": "text",
|
||||
"hidden": true,
|
||||
"label": "@30015",
|
||||
"enabled": "!eq(-1,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": "!eq(-1,'') + !eq(-2,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "pordedesortlinks",
|
||||
"type": "list",
|
||||
"label": "Ordenar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-2,'') + !eq(-3,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Por no Reportes",
|
||||
"Por Idioma",
|
||||
"Por Calidad",
|
||||
"Por Idioma y Calidad",
|
||||
"Por Idioma y no Reportes",
|
||||
"Por Idioma, Calidad y no Reportes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "pordedeshowlinks",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-3,'') + !eq(-4,'')",
|
||||
"lvalues": [
|
||||
"Todos",
|
||||
"Ver online",
|
||||
"Descargar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "pordedenumberlinks",
|
||||
"type": "list",
|
||||
"label": "Limitar número de enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-4,'') + !eq(-5,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"5",
|
||||
"10",
|
||||
"15",
|
||||
"20",
|
||||
"25",
|
||||
"30"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,665 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
def login():
|
||||
url_origen = "http://www.pordede.com"
|
||||
data = httptools.downloadpage(url_origen).data
|
||||
if config.get_setting("pordedeuser", "pordede") in data:
|
||||
return True
|
||||
|
||||
url = "http://www.pordede.com/api/login/auth?response_type=code&client_id=appclient&redirect_uri=http%3A%2F%2Fwww.pordede.com%2Fapi%2Flogin%2Freturn&state=none"
|
||||
post = "username=%s&password=%s&authorized=autorizar" % (config.get_setting("pordedeuser", "pordede"), config.get_setting("pordedepassword", "pordede"))
|
||||
data = httptools.downloadpage(url, post).data
|
||||
if '"ok":true' in data:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
if not config.get_setting("pordedeuser", "pordede"):
|
||||
itemlist.append( Item( channel=item.channel , title="Habilita tu cuenta en la configuración..." , action="settingCanal" , url="") )
|
||||
else:
|
||||
result = login()
|
||||
if not result:
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
|
||||
return itemlist
|
||||
itemlist.append( Item(channel=item.channel, action="menuseries" , title="Series" , url="" ))
|
||||
itemlist.append( Item(channel=item.channel, action="menupeliculas" , title="Películas y documentales" , url="" ))
|
||||
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Listas que sigues" , url="http://www.pordede.com/lists/following" ))
|
||||
itemlist.append( Item(channel=item.channel, action="tus_listas" , title="Tus listas" , url="http://www.pordede.com/lists/yours" ))
|
||||
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Top listas" , url="http://www.pordede.com/lists" ))
|
||||
itemlist.append( Item(channel=item.channel, action="settingCanal" , title="Configuración..." , url="" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
def settingCanal(item):
|
||||
return platformtools.show_channel_settings()
|
||||
|
||||
def menuseries(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/series/loadmedia/offset/0/showlist/hot" ))
|
||||
itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/series" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Siguiendo" , url="http://www.pordede.com/series/following" ))
|
||||
itemlist.append( Item(channel=item.channel, action="siguientes" , title="Siguientes Capítulos" , url="http://www.pordede.com/main/index" , viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/series/favorite" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/series/pending" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Terminadas" , url="http://www.pordede.com/series/seen" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/series/recommended" ))
|
||||
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/series" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
def menupeliculas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/pelis/loadmedia/offset/0/showlist/hot" ))
|
||||
itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/pelis" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/pelis/favorite" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/pelis/pending" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Vistas" , url="http://www.pordede.com/pelis/seen" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/pelis/recommended" ))
|
||||
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/pelis" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
data = scrapertools.find_single_match(data,'<div class="section genre">(.*?)</div>')
|
||||
patron = '<a class="mediaFilterLink" data-value="([^"]+)" href="([^"]+)">([^<]+)<span class="num">\((\d+)\)</span></a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for textid,scrapedurl,scrapedtitle,cuantos in matches:
|
||||
title = scrapedtitle.strip()+" ("+cuantos+")"
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
|
||||
if "/pelis" in item.url:
|
||||
url = "http://www.pordede.com/pelis/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
|
||||
else:
|
||||
url = "http://www.pordede.com/series/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item,texto):
|
||||
logger.info()
|
||||
|
||||
if item.url=="":
|
||||
item.url="http://www.pordede.com/pelis"
|
||||
|
||||
texto = texto.replace(" ","-")
|
||||
|
||||
item.extra = item.url
|
||||
item.url = item.url+"/loadmedia/offset/0/query/"+texto+"/years/1950/on/undefined/showlist/all"
|
||||
|
||||
try:
|
||||
return buscar(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def buscar(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
return parse_mixed_results(item,data)
|
||||
|
||||
def parse_mixed_results(item,data):
|
||||
patron = '<a class="defaultLink extended" href="([^"]+)"[^<]+'
|
||||
patron += '<div class="coverMini shadow tiptip" title="([^"]+)"[^<]+'
|
||||
patron += '<img class="centeredPic.*?src="([^"]+)"'
|
||||
patron += '[^<]+<img[^<]+<div class="extra-info">'
|
||||
patron += '<span class="year">([^<]+)</span>'
|
||||
patron += '<span class="value"><i class="icon-star"></i>([^<]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedyear,scrapedvalue in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
if scrapedyear != '':
|
||||
title += " ("+scrapedyear+")"
|
||||
fulltitle = title
|
||||
if scrapedvalue != '':
|
||||
title += " ("+scrapedvalue+")"
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb","mediabigcover")
|
||||
plot = ""
|
||||
|
||||
if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
|
||||
|
||||
if "/peli/" in scrapedurl:
|
||||
sectionStr = "peli"
|
||||
else:
|
||||
sectionStr = "docu"
|
||||
|
||||
referer = urlparse.urljoin(item.url,scrapedurl)
|
||||
url = referer.replace("/{0}/".format(sectionStr),"/links/view/slug/")+"/what/{0}".format(sectionStr)
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
|
||||
else:
|
||||
referer = item.url
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="episodios" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
|
||||
|
||||
next_page = scrapertools.find_single_match(data, '<div class="loadingBar" data-url="([^"]+)"')
|
||||
if next_page != "":
|
||||
url = urlparse.urljoin("http://www.pordede.com", next_page)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title=">> Página siguiente", extra=item.extra, url=url))
|
||||
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
def siguientes(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
|
||||
patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
|
||||
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
|
||||
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
|
||||
patron += '<div class="extra-info"><span class="year">[^<]+'
|
||||
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
|
||||
patron += '</span></div>[^<]+'
|
||||
patron += '</div>[^<]+'
|
||||
patron += '</a>[^<]+'
|
||||
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedtitle,scrapedthumbnail,scrapedurl,scrapedsession,scrapedepisode in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
session = scrapertools.htmlclean(scrapedsession)
|
||||
episode = scrapertools.htmlclean(scrapedepisode)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb","mediabigcover")
|
||||
plot = ""
|
||||
title = session + "x" + episode + " - " + title
|
||||
|
||||
referer = urlparse.urljoin(item.url,scrapedurl)
|
||||
url = referer
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="episodio" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, fanart=fanart, extra=session+"|"+episode))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodio(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
session = str(int(item.extra.split("|")[0]))
|
||||
episode = str(int(item.extra.split("|")[1]))
|
||||
patrontemporada = '<div class="checkSeason"[^>]+>Temporada '+session+'<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
|
||||
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
|
||||
|
||||
for bloque_episodios in matchestemporadas:
|
||||
# Extrae los episodios
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">'+episode+' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
|
||||
|
||||
for scrapedurl,scrapedtitle,info,visto in matches:
|
||||
if visto.strip()=="active":
|
||||
visto_string = "[visto] "
|
||||
else:
|
||||
visto_string = ""
|
||||
numero=episode
|
||||
title = visto_string+session+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
|
||||
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
|
||||
url = "http://www.pordede.com/links/viewepisode/id/"+epid
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=item.fanart, show=item.show))
|
||||
|
||||
itemlist2 = []
|
||||
for capitulo in itemlist:
|
||||
itemlist2 = findvideos(capitulo)
|
||||
|
||||
return itemlist2
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
return parse_mixed_results(item,data)
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
idserie = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
|
||||
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
|
||||
|
||||
idserie = scrapertools.find_single_match(data,'<div id="layout4" class="itemProfile modelContainer" data-model="serie" data-id="(\d+)"')
|
||||
|
||||
for nombre_temporada,bloque_episodios in matchestemporadas:
|
||||
# Extrae los episodios
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
|
||||
|
||||
for scrapedurl,numero,scrapedtitle,info,visto in matches:
|
||||
if visto.strip()=="active":
|
||||
visto_string = "[visto] "
|
||||
else:
|
||||
visto_string = ""
|
||||
|
||||
title = visto_string+nombre_temporada.replace("Temporada ", "").replace("Extras", "Extras 0")+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
|
||||
thumbnail = item.thumbnail
|
||||
fanart= item.fanart
|
||||
plot = ""
|
||||
|
||||
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
|
||||
url = "http://www.pordede.com/links/viewepisode/id/"+epid
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart= fanart, show=item.show))
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
|
||||
|
||||
itemlist.append( Item(channel='pordede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=show) )
|
||||
itemlist.append( Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Pendiente", tipo="serie", idtemp=idserie, valor="1", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Siguiendo", tipo="serie", idtemp=idserie, valor="2", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Finalizada", tipo="serie", idtemp=idserie, valor="3", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Favorita", tipo="serie", idtemp=idserie, valor="4", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Quitar marca", tipo="serie", idtemp=idserie, valor="0", action="pordede_check", show=show))
|
||||
|
||||
return itemlist
|
||||
|
||||
def parse_listas(item, patron):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl,scrapedtitle,scrapeduser,scrapedfichas in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle + ' (' + scrapedfichas + ' fichas, por ' + scrapeduser + ')')
|
||||
url = urlparse.urljoin(item.url,scrapedurl) + "/offset/0/loadmedia"
|
||||
thumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista" , title=title , url=url))
|
||||
|
||||
nextpage = scrapertools.find_single_match(data,'data-url="(/lists/loadlists/offset/[^"]+)"')
|
||||
if nextpage != '':
|
||||
url = urlparse.urljoin(item.url,nextpage)
|
||||
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title=">> Página siguiente" , extra=item.extra, url=url))
|
||||
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
def listas_sigues(item):
|
||||
logger.info()
|
||||
|
||||
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
|
||||
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
|
||||
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
|
||||
|
||||
return parse_listas(item, patron)
|
||||
|
||||
def tus_listas(item):
|
||||
logger.info()
|
||||
|
||||
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
|
||||
patron += '<div class="right"[^<]+'
|
||||
patron += '<button[^<]+</button[^<]+'
|
||||
patron += '<button[^<]+</button[^<]+'
|
||||
patron += '</div[^<]+'
|
||||
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
|
||||
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
|
||||
|
||||
return parse_listas(item, patron)
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
return parse_mixed_results(item,data)
|
||||
|
||||
def findvideos(item, verTodos=False):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info(data)
|
||||
|
||||
sesion = scrapertools.find_single_match(data,'SESS = "([^"]+)";')
|
||||
|
||||
patron = '<a target="_blank" class="a aporteLink(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
idpeli = scrapertools.find_single_match(data,'<div class="buttons"><button class="defaultPopup onlyLogin" href="/links/create/ref_id/(\d+)/ref_model/4">Añadir enlace')
|
||||
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and "/what/peli" in item.url:
|
||||
itemlist.append( Item(channel=item.channel, action="infosinopsis" , title="INFO / SINOPSIS" , url=item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False ))
|
||||
|
||||
itemsort = []
|
||||
sortlinks = config.get_setting("pordedesortlinks",item.channel)
|
||||
showlinks = config.get_setting("pordedeshowlinks",item.channel)
|
||||
|
||||
if sortlinks != '' and sortlinks !="No":
|
||||
sortlinks = int(sortlinks)
|
||||
else:
|
||||
sortlinks = 0
|
||||
|
||||
if showlinks != '' and showlinks !="No":
|
||||
showlinks = int(showlinks)
|
||||
else:
|
||||
showlinks = 0
|
||||
|
||||
for match in matches:
|
||||
jdown = scrapertools.find_single_match(match,'<div class="jdownloader">[^<]+</div>')
|
||||
if (showlinks == 1 and jdown != '') or (showlinks == 2 and jdown == ''):
|
||||
continue
|
||||
|
||||
idiomas = re.compile('<div class="flag([^"]+)">([^<]+)</div>',re.DOTALL).findall(match)
|
||||
idioma_0 = (idiomas[0][0].replace(" ","").strip() + " " + idiomas[0][1].replace(" ","").strip()).strip()
|
||||
if len(idiomas) > 1:
|
||||
idioma_1 = (idiomas[1][0].replace(" ","").strip() + " " + idiomas[1][1].replace(" ","").strip()).strip()
|
||||
idioma = idioma_0 + ", " + idioma_1
|
||||
else:
|
||||
idioma_1 = ''
|
||||
idioma = idioma_0
|
||||
|
||||
calidad_video = scrapertools.find_single_match(match,'<div class="linkInfo quality"><i class="icon-facetime-video"></i>([^<]+)</div>')
|
||||
calidad_audio = scrapertools.find_single_match(match,'<div class="linkInfo qualityaudio"><i class="icon-headphones"></i>([^<]+)</div>')
|
||||
|
||||
thumb_servidor = scrapertools.find_single_match(match,'<div class="hostimage"[^<]+<img\s*src="([^"]+)">')
|
||||
nombre_servidor = scrapertools.find_single_match(thumb_servidor,"popup_([^\.]+)\.png")
|
||||
|
||||
if jdown != '':
|
||||
title = "Download "+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")"
|
||||
else:
|
||||
title = "Ver en "+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")"
|
||||
|
||||
cuenta = []
|
||||
valoracion = 0
|
||||
for idx, val in enumerate(['1', '2', 'report']):
|
||||
nn = scrapertools.find_single_match(match,'<span\s+data-num="([^"]+)"\s+class="defaultPopup"\s+href="/likes/popup/value/'+val+'/')
|
||||
if nn != '0' and nn != '':
|
||||
cuenta.append(nn + ' ' + ['ok', 'ko', 'rep'][idx])
|
||||
|
||||
if val == '1':
|
||||
valoracion += int(nn)
|
||||
else:
|
||||
valoracion += -int(nn)
|
||||
|
||||
if len(cuenta) > 0:
|
||||
title += ' (' + ', '.join(cuenta) + ')'
|
||||
|
||||
url = urlparse.urljoin( item.url , scrapertools.find_single_match(match,'href="([^"]+)"') )
|
||||
thumbnail = thumb_servidor
|
||||
plot = ""
|
||||
|
||||
if sortlinks > 0:
|
||||
if sortlinks == 1:
|
||||
orden = valoracion
|
||||
elif sortlinks == 2:
|
||||
orden = valora_idioma(idioma_0, idioma_1)
|
||||
elif sortlinks == 3:
|
||||
orden = valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 4:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 5:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
|
||||
elif sortlinks == 6:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
|
||||
|
||||
itemsort.append({'action': "play", 'title': title, 'url':url, 'thumbnail':thumbnail, 'fanart':item.fanart, 'plot':plot, 'extra':sesion+"|"+item.url, 'fulltitle':item.fulltitle, 'orden1': (jdown == ''), 'orden2':orden})
|
||||
else:
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, fanart= item.fanart, plot=plot, extra=sesion+"|"+item.url, fulltitle=item.fulltitle))
|
||||
|
||||
if sortlinks > 0:
|
||||
numberlinks = config.get_setting("pordedenumberlinks",item.channel)
|
||||
|
||||
if numberlinks != '' and numberlinks !="No":
|
||||
numberlinks = int(numberlinks)
|
||||
else:
|
||||
numberlinks = 0
|
||||
|
||||
if numberlinks == 0:
|
||||
verTodos = True
|
||||
|
||||
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
|
||||
for i, subitem in enumerate(itemsort):
|
||||
if verTodos == False and i >= numberlinks:
|
||||
itemlist.append(Item(channel=item.channel, action='findallvideos' , title='Ver todos los enlaces', url=item.url, extra=item.extra ))
|
||||
break
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action=subitem['action'] , title=subitem['title'] , url=subitem['url'] , thumbnail=subitem['thumbnail'] , fanart= subitem['fanart'], plot=subitem['plot'] , extra=subitem['extra'] , fulltitle=subitem['fulltitle'] ))
|
||||
|
||||
if "/what/peli" in item.url or "/what/docu" in item.url:
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Pendiente" , valor="1", idtemp=idpeli))
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Vista" , valor="3", idtemp=idpeli))
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Favorita" , valor="4", idtemp=idpeli))
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Quitar Marca" , valor="0", idtemp=idpeli))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findallvideos(item):
|
||||
return findvideos(item, True)
|
||||
|
||||
def play(item):
|
||||
# Marcar como visto
|
||||
checkseen(item.extra.split("|")[1])
|
||||
|
||||
headers = {'Referer': item.extra.split("|")[1]}
|
||||
|
||||
data = httptools.downloadpage(item.url, post="_s="+item.extra.split("|")[0], headers=headers).data
|
||||
url = scrapertools.find_single_match(data,'<p class="nicetry links">\s+<a href="([^"]+)" target="_blank"')
|
||||
url = urlparse.urljoin(item.url,url)
|
||||
|
||||
headers = {'Referer': item.url}
|
||||
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
|
||||
|
||||
itemlist = servertools.find_video_items(data=media_url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
def checkseen(item):
|
||||
logger.info(item)
|
||||
|
||||
if "/viewepisode/" in item:
|
||||
episode = item.split("/")[-1]
|
||||
httptools.downloadpage("http://www.pordede.com/ajax/action", post="model=episode&id="+episode+"&action=seen&value=1")
|
||||
|
||||
if "/what/peli" in item:
|
||||
data = httptools.downloadpage(item).data
|
||||
|
||||
movieid = scrapertools.find_single_match(data,'href="/links/create/ref_id/([0-9]+)/ref_model/')
|
||||
httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", post="model=peli&id="+movieid+"&action=status&value=3")
|
||||
|
||||
return True
|
||||
|
||||
def infosinopsis(item):
|
||||
logger.info()
|
||||
|
||||
url_aux = item.url.replace("/links/view/slug/", "/peli/").replace("/what/peli", "")
|
||||
# Descarga la pagina
|
||||
|
||||
data = httptools.downloadpage(url_aux).data
|
||||
|
||||
scrapedtitle = scrapertools.find_single_match(data,'<h1>([^<]+)</h1>')
|
||||
scrapedvalue = scrapertools.find_single_match(data,'<span class="puntuationValue" data-value="([^"]+)"')
|
||||
scrapedyear = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>')
|
||||
scrapedduration = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>', 1)
|
||||
scrapedplot = scrapertools.find_single_match(data,'<div class="info text"[^>]+>([^<]+)</div>')
|
||||
scrapedgenres = re.compile('href="/pelis/index/genre/[^"]+">([^<]+)</a>',re.DOTALL).findall(data)
|
||||
scrapedcasting = re.compile('href="/star/[^"]+">([^<]+)</a><br/><span>([^<]+)</span>',re.DOTALL).findall(data)
|
||||
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
plot = "Año: [B]"+scrapedyear+"[/B]"
|
||||
plot += " , Duración: [B]"+scrapedduration+"[/B]"
|
||||
plot += " , Puntuación usuarios: [B]"+scrapedvalue+"[/B]"
|
||||
plot += "\nGéneros: "+", ".join(scrapedgenres)
|
||||
plot += "\n\nSinopsis:\n"+scrapertools.htmlclean(scrapedplot)
|
||||
plot += "\n\nCasting:\n"
|
||||
for actor,papel in scrapedcasting:
|
||||
plot += actor+" ("+papel+"). "
|
||||
|
||||
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
|
||||
tbd.ask(title, plot)
|
||||
|
||||
del tbd
|
||||
return
|
||||
|
||||
try:
|
||||
import xbmcgui
|
||||
|
||||
class TextBox( xbmcgui.WindowXML ):
|
||||
""" Create a skinned textbox window """
|
||||
def __init__( self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def onInit( self ):
|
||||
try:
|
||||
self.getControl( 5 ).setText( self.text )
|
||||
self.getControl( 1 ).setLabel( self.title )
|
||||
except: pass
|
||||
|
||||
def onClick( self, controlId ):
|
||||
pass
|
||||
|
||||
def onFocus( self, controlId ):
|
||||
pass
|
||||
|
||||
def onAction( self, action ):
|
||||
if action == 7:
|
||||
self.close()
|
||||
|
||||
def ask(self, title, text ):
|
||||
self.title = title
|
||||
self.text = text
|
||||
self.doModal()
|
||||
except:
|
||||
pass
|
||||
|
||||
def valora_calidad(video, audio):
|
||||
prefs_video = [ 'hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener' ]
|
||||
prefs_audio = [ 'dts', '5.1', 'rip', 'line', 'screener' ]
|
||||
|
||||
video = ''.join(video.split()).lower()
|
||||
if video in prefs_video:
|
||||
pts = (9 - prefs_video.index(video)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
audio = ''.join(audio.split()).lower()
|
||||
if audio in prefs_audio:
|
||||
pts = (9 - prefs_audio.index(audio)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
return pts
|
||||
|
||||
def valora_idioma(idioma_0, idioma_1):
|
||||
prefs = [ 'spanish', 'spanish LAT', 'catalan', 'english', 'french' ]
|
||||
|
||||
if idioma_0 in prefs:
|
||||
pts = (9 - prefs.index(idioma_0)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
if idioma_1 != '':
|
||||
idioma_1 = idioma_1.replace(' SUB', '')
|
||||
|
||||
if idioma_1 in prefs:
|
||||
pts += 8 - prefs.index(idioma_1)
|
||||
else:
|
||||
pts += 8 - 1
|
||||
|
||||
else:
|
||||
pts += 9
|
||||
|
||||
return pts
|
||||
|
||||
def pordede_check(item):
|
||||
httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", post="model="+item.tipo+"&id="+item.idtemp+"&action=status&value="+item.valor)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "seodiv",
|
||||
"name": "Seodiv",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s32.postimg.cc/gh8lhbkb9/seodiv.png",
|
||||
|
||||
@@ -22,25 +22,25 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title="", action="", folder=False, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60536), action="", folder=False,
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60536) + ":", text_bold=True, action="", folder=False,
|
||||
thumbnail=get_thumb("setting_0.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60537), action="menu_channels", folder=True,
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=" " + config.get_localized_string(60537), action="menu_channels", folder=True,
|
||||
thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60538), action="menu_servers", folder=True,
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=" " + config.get_localized_string(60538), action="menu_servers", folder=True,
|
||||
thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(channel="news", title=config.get_localized_string(60539), action="menu_opciones",
|
||||
itemlist.append(Item(channel="news", title=" " + config.get_localized_string(60539), action="menu_opciones",
|
||||
folder=True, thumbnail=get_thumb("news.png")))
|
||||
itemlist.append(Item(channel="search", title=config.get_localized_string(60540), action="opciones", folder=True,
|
||||
itemlist.append(Item(channel="search", title=" " + config.get_localized_string(60540), action="opciones", folder=True,
|
||||
thumbnail=get_thumb("search.png")))
|
||||
#itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60541), action="channel_config",
|
||||
# config="downloads", folder=True, thumbnail=get_thumb("downloads.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=" " + config.get_localized_string(60541), action="channel_config",
|
||||
config="downloads", folder=True, thumbnail=get_thumb("downloads.png")))
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel="videolibrary", title=config.get_localized_string(60542), action="channel_config",
|
||||
itemlist.append(Item(channel="videolibrary", title=" " + config.get_localized_string(60542), action="channel_config",
|
||||
folder=True, thumbnail=get_thumb("videolibrary.png")))
|
||||
|
||||
if config.is_xbmc():
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(70253), action="setting_torrent",
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=" " + config.get_localized_string(70253), action="setting_torrent",
|
||||
folder=True, thumbnail=get_thumb("channels_torrent.png")))
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
@@ -270,13 +270,6 @@
|
||||
"@60649"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "lab_1",
|
||||
"type": "label",
|
||||
"label": "@60650",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "lowerize_title",
|
||||
"type": "list",
|
||||
@@ -287,6 +280,13 @@
|
||||
"No"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "lab_1",
|
||||
"type": "label",
|
||||
"label": "@60650",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "scraper_movies",
|
||||
"type": "list",
|
||||
|
||||
@@ -2550,7 +2550,7 @@ msgid "Show option \"All Seasons\"."
|
||||
msgstr ""
|
||||
|
||||
msgctxt "#60643"
|
||||
msgid "Do not combine the seasons of the series""
|
||||
msgid "Do not combine the seasons of the series"
|
||||
msgstr ""
|
||||
|
||||
msgctxt "#60644"
|
||||
|
||||
@@ -3358,8 +3358,16 @@ msgid "Press to 'Clear cache' saved"
|
||||
msgstr "Pulse para 'Borrar caché' guardada"
|
||||
|
||||
msgctxt "#70164"
|
||||
msgid "Free First|Premium First|Debriders First"
|
||||
msgstr "Free primero|Premium primero|Debriders primero"
|
||||
msgid "Free First"
|
||||
msgstr "Free primero"
|
||||
|
||||
msgctxt "#70165"
|
||||
msgid "Premium First"
|
||||
msgstr "Premium primero"
|
||||
|
||||
msgctxt "#70166"
|
||||
msgid "Debriders First"
|
||||
msgstr "Debriders primero"
|
||||
|
||||
msgctxt "#70167"
|
||||
msgid "Titles Options"
|
||||
@@ -3674,7 +3682,7 @@ msgid "Reorder"
|
||||
msgstr "Reordenar"
|
||||
|
||||
msgctxt "#70246"
|
||||
msgid " - Lingua preferita"
|
||||
msgid " - Preferred language"
|
||||
msgstr " - Idioma preferido"
|
||||
|
||||
msgctxt "#70247"
|
||||
|
||||
@@ -3358,8 +3358,16 @@ msgid "Press to 'Clear cache' saved"
|
||||
msgstr "Pulse para 'Borrar caché' guardada"
|
||||
|
||||
msgctxt "#70164"
|
||||
msgid "Free First|Premium First|Debriders First"
|
||||
msgstr "Free primero|Premium primero|Debriders primero"
|
||||
msgid "Free First"
|
||||
msgstr "Free primero"
|
||||
|
||||
msgctxt "#70165"
|
||||
msgid "Premium First"
|
||||
msgstr "Premium primero"
|
||||
|
||||
msgctxt "#70166"
|
||||
msgid "Debriders First"
|
||||
msgstr "Debriders primero"
|
||||
|
||||
msgctxt "#70167"
|
||||
msgid "Titles Options"
|
||||
@@ -3674,7 +3682,7 @@ msgid "Reorder"
|
||||
msgstr "Reordenar"
|
||||
|
||||
msgctxt "#70246"
|
||||
msgid " - Lingua preferita"
|
||||
msgid " - Preferred language"
|
||||
msgstr " - Idioma preferido"
|
||||
|
||||
msgctxt "#70247"
|
||||
|
||||
@@ -3682,7 +3682,7 @@ msgid "Reorder"
|
||||
msgstr "Reordenar"
|
||||
|
||||
msgctxt "#70246"
|
||||
msgid " - Lingua preferita"
|
||||
msgid " - Preferred language"
|
||||
msgstr " - Idioma preferido"
|
||||
|
||||
msgctxt "#70247"
|
||||
|
||||
Reference in New Issue
Block a user