Correcciones

- Dilo: Nuevo canal
- Goovie: Correcion por cambio de estructura
- PelisIpad: Correccion para thumbs
- SeriesBlanco: Correccion por cambio de estructura
- platformtools: thumbs genericos para videoteca y pagina siguiente
This commit is contained in:
Unknown
2018-09-19 15:19:23 -03:00
parent 0caa11ef11
commit f3cf2e518b
7 changed files with 427 additions and 136 deletions

View File

@@ -0,0 +1,37 @@
{
"id": "dilo",
"name": "Dilo",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://s22.postimg.cc/u6efsniqp/dilo.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,297 @@
# -*- coding: utf-8 -*-
# -*- Channel Dilo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www.dilo.nu/'
IDIOMAS = {'Español': 'CAST', 'Latino': 'LAT', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'powvideo', 'clipwatching', 'streamplay', 'streamcherry', 'gamovideo']
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes", url=host,
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title="Ultimas", action="latest_shows", url=host,
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
url=host + 'catalogue', thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section", url=host + 'catalogue',
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'font-weight-500">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail)
new_item.contentSerieName=scrapedtitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
page_base = host + 'catalogue'
next_page = scrapertools.find_single_match(data, '<a href="([^ ]+)" aria-label="Netx">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=page_base+next_page, thumbnail=get_thumb("more.png"),
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(item.url)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '>Todos los generos</button>.*?<button class')
elif 'Años' in item.title:
data = scrapertools.find_single_match(data, '>Todos los años</button>.*?<button class')
patron = 'input" id="([^"]+)".*?name="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, name in matches:
url = '%s?%s=%s' % (item.url, name, id)
title = id.capitalize()
itemlist.append(Item(channel=item.channel, title=title, url=url, action='list_all'))
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class="media" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?'
patron += 'width: 97%">([^<]+)</div><div>(\d+x\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcontent, scrapedep in matches:
title = '%s)' % (scrapedtitle.replace(' Online ', ' ('))
contentSerieName = scrapedcontent
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def latest_shows(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '>Nuevas series</div>.*?text-uppercase"')
patron = '<div class="col-lg-3 col-md-4 col-6 mb-3"><a href="([^"]+)".*?src="([^"]+)".*?weight-500">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='seasons', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def seasons(item):
from core import jsontools
import urllib
logger.info()
itemlist=[]
data=get_source(item.url)
serie_id = scrapertools.find_single_match(data, '{"item_id": (\d+)}')
post = {'item_id': serie_id}
post = urllib.urlencode(post)
seasons_url = '%sapi/web/seasons.php' % host
headers = {'Referer':item.url}
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
season = dict['number']
if season != '0':
infoLabels['season'] = season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, url=item.url, title=title, action='episodesxseason',
contentSeasonNumber=season, id=serie_id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodesxseason(item):
logger.info()
from core import jsontools
import urllib
logger.info()
itemlist = []
season = item.infoLabels['season']
post = {'item_id': item.id, 'season_number': season}
post = urllib.urlencode(post)
seasons_url = '%sapi/web/episodes.php' % host
headers = {'Referer': item.url}
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
episode = dict['number']
epi_name = dict['name']
title = '%sx%s - %s' % (season, episode, epi_name)
url = '%s%s/' % (host, dict['permalink'])
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, action='findvideos', url=url,
contentEpisodeNumber=season, id=item.id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'data-link="([^"]+)">.*?500">([^<]+)<.*?>Reproducir en ([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url, server, language in matches:
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = ''
itemlist.append(Item(channel=item.channel, title='%s'+title, url=enc_url, action='play',
language=IDIOMAS[language], server=server, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def decode_link(enc_url):
logger.info()
try:
new_data = get_source(enc_url)
new_enc_url = scrapertools.find_single_match(new_data, 'src="([^"]+)"')
try:
url = httptools.downloadpage(new_enc_url, follow_redirects=False).headers['location']
except:
if not 'jquery' in new_enc_url:
url = new_enc_url
except:
pass
return url
def play(item):
logger.info()
item.url = decode_link(item.url)
itemlist = [item]
return itemlist
def search(item, texto):
logger.info()
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -17,10 +17,10 @@ from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'1':'Cast', '2':'Lat', '3':'VOSE', '4':'VO'}
IDIOMAS = {'EspaL':'Cast', 'LatinoL':'Lat', 'SubL':'VOSE', 'OriL':'VO'}
list_language = IDIOMAS.values()
CALIDADES = {'1':'1080','2':'720','3':'480','4':'360'}
CALIDADES = {'1080p':'1080','720p':'720','480p':'480','360p':'360'}
list_quality = ['1080', '720', '480', '360']
@@ -89,17 +89,20 @@ def section(item):
logger.info()
itemlist=[]
data = get_source(host+item.type)
if 'Genero' in item.title:
data = scrapertools.find_single_match(data, 'genero.*?</ul>')
data = scrapertools.find_single_match(data, 'Generos.*?</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, 'año.*?</ul>')
patron = '<a href=(.*?) >(.*?)</a>'
data = scrapertools.find_single_match(data, 'Años.*?</ul>')
patron = "<li onclick=filter\(this, '([^']+)', \d+\);>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
for scrapedtitle in matches:
title = scrapedtitle
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all',
if r'\d+' in scrapedtitle:
url = '%s%s/filtro/,/%s,' % (host, item.type, title)
else:
url = '%s%s/filtro/%s,/,' % (host, item.type, title)
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all',
type=item.type))
return itemlist
@@ -109,46 +112,33 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
#logger.debug(data)
#return
if item.type == 'peliculas':
patron = '<article class=Items>.*?<img src=(.*?) />.*?<a href=(.*?)><h2>(.*?)</h2>.*?'
patron += "<p>(.*?)</p><span>(\d{4}) /.*?</span>.*?'(\d+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '<article class=Item><a href=([^>]+)><div class=Poster>'
patron += '<img src=(.+?)(?:>|alt).*?<h2>([^>]+)</h2>.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedplot, year, video_id in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = '%s [%s]' % (scrapedtitle, year)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail.strip()
url = scrapedurl
filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w154", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
plot=thumbnail,
infoLabels={'filtro':filter_list})
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
video_id=video_id,
infoLabels={'year':year}))
if item.type == 'peliculas':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.action = 'seasons'
new_item.contentSerieName = scrapedtitle
elif item.type == 'series':
patron = '<article class=GoItemEp>.*?<a href=(.*?)>.*?<img src=(.*?) />.*?'
patron +='<h2>(.*?)</h2><p>(.*?)</p><span>(\d{4}) /'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
plot=scrapedplot,
contentSerieName=contentSerieName,
infoLabels={'year':year}))
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
@@ -199,21 +189,18 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
logger.debug(data)
patron= "ViewEpisode\('(\d+)', this\)><div class=num>%s - (\d+)</div>" % item.infoLabels['season']
patron += ".*?src=(.*?) />.*?namep>(.*?)<span>"
patron= "<li><a href=([^>]+)><b>%s - (\d+)</b><h2 class=eTitle>([^>]+)</h2>" % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for video_id, scrapedepisode, scrapedthumbnail, scrapedtitle in matches:
for url, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=item.url, thumbnail=scrapedthumbnail,
action='findvideos', video_id=video_id, infoLabels=infoLabels))
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -224,87 +211,45 @@ def findvideos(item):
logger.info()
from lib import jsunpack
itemlist = []
headers = {'referer':item.url}
if item.video_id == '':
find_id = get_source(item.url)
#logger.debug(find_id)
#return
item.video_id = scrapertools.find_single_match(find_id, 'var centerClick = (\d+);')
url = 'https://goovie.co/api/links/%s' % item.video_id
data = httptools.downloadpage(url, headers=headers).data
video_list = jsontools.load(data)
for video_info in video_list:
logger.debug(video_info)
url = video_info['visor']
plot = 'idioma: %s calidad: %s' % (video_info['idioma'], video_info['calidad'])
data = get_source(item.url)
patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>"
matches = re.compile(patron, re.DOTALL).findall(data)
headers = {'referer': item.url}
for url, quality, language in matches:
data = httptools.downloadpage(url, headers=headers, follow_redirects=False).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
packed = scrapertools.find_single_match(data, '(eval\(.*?);var')
unpacked = jsunpack.unpack(packed)
logger.debug('unpacked %s' % unpacked)
server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/")
id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'")
if server == '':
if 'powvideo' in unpacked:
id = scrapertools.find_single_match(unpacked ,",description:.'(.*?).'")
server= 'https://powvideo.net'
id = scrapertools.find_single_match(unpacked, ",description:.'(.*?).'")
server = 'https://powvideo.net'
url = '%s/%s' % (server, id)
if server != '' and id != '':
language = IDIOMAS[video_info['idioma']]
quality = CALIDADES[video_info['calidad']]
language = IDIOMAS[language]
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=language,
quality=quality))
itmelist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,
quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return sorted(itemlist, key=lambda i: i.language)
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'peliculas'
if texto != '':
return search_results(item)
return list_all(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
logger.debug(data)
patron = '<article class=Items>.*?href=(.*?)>.*?typeContent>(.*?)<.*?'
patron += '<img src=(.*?) />.*?<h2>(.*?)</h2><p>(.*?)</p><span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, content_type ,scrapedthumb, scrapedtitle, scrapedplot, year in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
if content_type != 'Serie':
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
action=action, type=content_type, infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
@@ -313,9 +258,9 @@ def newest(categoria):
if categoria in ['peliculas']:
item.url = host + 'peliculas'
elif categoria == 'infantiles':
item.url = host + 'peliculas/generos/animación'
item.url = host + 'peliculas/filtro/Animación,/,'
elif categoria == 'terror':
item.url = host + 'peliculas/generos/terror'
item.url = host + 'peliculas/filtro/Terror,/,'
item.type='peliculas'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':

View File

@@ -77,10 +77,10 @@ def submenu(item):
url=host % "list/ultimas-peliculas" + ext, text_color=color2,
thumbnail=host % "list/ultimas-peliculas/thumbnail_167x250.jpg",
fanart=host % "list/ultimas-peliculas/background_1080.jpg", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas",
url=host % "list/000-novedades" + ext, text_color=color2,
thumbnail=host % "list/screener/thumbnail_167x250.jpg",
fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot"))
# itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas",
# url=host % "list/000-novedades" + ext, text_color=color2,
# thumbnail=host % "list/screener/thumbnail_167x250.jpg",
# fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas",
url=host % "list/peliculas-mas-vistas" + ext, text_color=color2,
thumbnail=host % "list/peliculas-mas-vistas/thumbnail_167x250.jpg",
@@ -167,7 +167,7 @@ def entradas(item):
#if child['year']:
# title += " (" + child['year'] + ")"
#title += quality
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
video_urls = []
for k, v in child.get("video", {}).items():
for vid in v:
@@ -232,6 +232,7 @@ def entradasconlistas(item):
thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
fanart = host % "list/%s/background_1080.jpg" % child["id"]
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action=action, title=title,
url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, show=show,
infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot",
@@ -295,7 +296,7 @@ def entradasconlistas(item):
for vid in v:
video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s",
vid["height"]])
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, video_urls=video_urls,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, viewmode="movie_with_plot", text_color=color3))
@@ -347,6 +348,7 @@ def series(item):
if child.get("numberOfSeasons") and "- Temporada" not in title:
title += " (Temps:%s)" % child['numberOfSeasons']
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, text_color=color3,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, viewmode="movie_with_plot", show=fulltitle))
@@ -414,6 +416,7 @@ def episodios(item):
title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
except:
title = fulltitle = child['id'].replace("-", " ")
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie",
show=item.show, infoLabels=infoLabels, video_urls=video_urls, extra="episodios",
@@ -491,6 +494,7 @@ def nuevos_cap(item):
else:
title = fulltitle = child['name']
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie",
show=item.fulltitle, infoLabels=infoLabels, video_urls=video_urls, extra="nuevos",
@@ -571,6 +575,7 @@ def listas(item):
infolabels['title'] = title
try:
from core import videolibrarytools
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos",
thumbnail=thumbnail, infoLabels=infolabels, category="Cine")
videolibrarytools.add_movie(new_item)

View File

@@ -212,21 +212,21 @@ def new_episodes(item):
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data,
'<center>Series Online : Capítulos estrenados recientemente</center>.*?</ul>')
patron = '<li><h6.*?src="([^"]+)".*?href="([^"]+)">.*?src="([^"]+)".*? data-original-title=" (\d+x\d+).*?'
patron = '<li><h6.*?src="([^"]+)".*?alt=" (\d+x\d+).+?".*?href="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedurl, scrapedthumbnail, scrapedinfo, in matches:
for lang_data, scrapedinfo, scrapedurl, scrapedthumbnail in matches:
url = host+scrapedurl
url =scrapedurl
thumbnail = scrapedthumbnail
scrapedinfo = scrapedinfo.split('x')
season = scrapedinfo[0]
episode = scrapedinfo[1]
scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/').replace("-", " ")
title = '%s - %sx%s' % (scrapedtitle, season, episode )
scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/')
url = '%scapitulos/%s' % (host, scrapedtitle)
title = '%s - %sx%s' % (scrapedtitle.replace('-', ' '), season, episode )
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action='seasons',

View File

@@ -154,6 +154,13 @@ def render_items(itemlist, parent_item):
valid_genre = True
elif anime:
valid_genre = True
elif 'siguiente' in item.title.lower() and '>' in item.title:
item.thumbnail = get_thumb("next.png")
elif 'add' in item.action:
if 'pelicula' in item.action:
item.thumbnail = get_thumb("videolibrary_movie.png")
elif 'serie' in item.action:
item.thumbnail = get_thumb("videolibrary_tvshow.png")
if unify_enabled and parent_item.channel != 'alfavorites':
@@ -1071,8 +1078,8 @@ def play_torrent(item, xlistitem, mediaurl):
#### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional
if xbmc.getCondVisibility('Window.IsMedia'):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evutar error Kod1 18
time.sleep(1) #Dejamos que se ejecute
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
time.sleep(1) #Dejamos tiempo para que se ejecute
mediaurl = urllib.quote_plus(item.url)
if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']: #Llamada con más parámetros para completar el título
@@ -1083,17 +1090,17 @@ def play_torrent(item, xlistitem, mediaurl):
xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")")
#Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos
if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]:
time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering
while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera
time.sleep(5) #Repetimos cada intervalo
#logger.debug(str(time_limit))
if item.strm_path and is_playing(): #Sólo si es de Videoteca
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar
#logger.debug("Llamado el marcado")
#Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan
#if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]:
time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering
while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera
time.sleep(5) #Repetimos cada intervalo
#logger.debug(str(time_limit))
if item.strm_path and is_playing(): #Sólo si es de Videoteca
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar
#logger.debug("Llamado el marcado")
if seleccion == 1:
from platformcode import mct

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

After

Width:  |  Height:  |  Size: 27 KiB