@@ -127,7 +127,6 @@ def peliculas(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
logger.info(data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title.strip()
|
||||
patron += '<span class="icon-star2"></span>(.*?)/div>.*?' # rating
|
||||
@@ -144,14 +143,17 @@ def peliculas(item):
|
||||
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
scrapedtitle, year, quality)
|
||||
thumb_id = scrapertools.find_single_match(scrapedthumbnail, '.*?\/uploads\/(.*?)-')
|
||||
thumbnail = "/%s.jpg" % thumb_id
|
||||
filtro_list = {"poster_path": thumbnail}
|
||||
filtro_list = filtro_list.items()
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'year': year, 'rating': rating},
|
||||
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, infoLabels={'filtro':filtro_list},
|
||||
contentTitle=contentTitle, thumbnail=thumbnail,
|
||||
title=title, context="buscar_trailer", quality = quality))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 20 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 20,
|
||||
|
||||
@@ -689,7 +689,7 @@ def get_enlaces(item, url, type):
|
||||
if servertools.is_server_enabled(server):
|
||||
scrapedtitle = " Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]"
|
||||
itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2,
|
||||
extra="", server=server))
|
||||
extra="", server=server, language=idioma))
|
||||
|
||||
if len(itemlist) == 1:
|
||||
itemlist.append(item.clone(title=" No hay enlaces disponibles", action="", text_color=color2))
|
||||
|
||||
@@ -169,6 +169,7 @@ def findvideos(item):
|
||||
videoitem.plot = info
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.infoLabels=item.infoLabels
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
|
||||
@@ -56,11 +56,17 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
|
||||
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
|
||||
language = []
|
||||
plot = scrapertools.htmlclean(resto).strip()
|
||||
logger.debug('plot: %s' % plot)
|
||||
languages = scrapertools.find_multiple_matches(plot, r'\((V.)\)')
|
||||
quality = scrapertools.find_single_match(plot, r'(?:\[.*?\].*?)\[(.*?)\]')
|
||||
for lang in languages:
|
||||
language.append(lang)
|
||||
logger.debug('languages: %s' % languages)
|
||||
title = scrapedtitle + " " + plot
|
||||
contentTitle = scrapedtitle
|
||||
url = item.url + scrapedurl
|
||||
@@ -73,7 +79,9 @@ def peliculas(item):
|
||||
hasContentDetails = True,
|
||||
contentTitle = contentTitle,
|
||||
contentType = "movie",
|
||||
context = ["buscar_trailer"]
|
||||
context = ["buscar_trailer"],
|
||||
language=language,
|
||||
quality=quality
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -120,40 +120,51 @@ def peliculas(item):
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches:
|
||||
if 'Proximamente' not in calidad:
|
||||
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches:
|
||||
if 'Proximamente' not in quality:
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
|
||||
'Español Latino', '').strip()
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad)
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
|
||||
|
||||
new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={'year': year, 'rating': rating}, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, next_page=next_page, quality=calidad, title=title)
|
||||
if year:
|
||||
tmdb.set_infoLabels_item(new_item, __modo_grafico__)
|
||||
itemlist.append(new_item)
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, next_page=next_page, quality=quality, title=title))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
item.fanart = scrapertools.find_single_match(data,
|
||||
"<meta property='og:image' content='([^']+)' />").replace(
|
||||
'w780', 'original')
|
||||
item.plot = scrapertools.find_single_match(data, '<div itemprop="description" class="wp-content">.*?<p>(['
|
||||
'^<]+)</p>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
item.infoLabels['director'] = scrapertools.find_single_match(data,
|
||||
'<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
item.infoLabels['rating'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>([^<]+)</strong>')
|
||||
item.infoLabels['votes'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>['
|
||||
'^<]+</strong>\s(.*?) votos</b>')
|
||||
for no_plot in itemlist:
|
||||
if no_plot.infoLabels['plot'] == '':
|
||||
thumb_id = scrapertools.find_single_match(no_plot.thumbnail, '.*?\/\d{2}\/(.*?)-')
|
||||
thumbnail = "/%s.jpg" % thumb_id
|
||||
filtro_list = {"poster_path": thumbnail}
|
||||
filtro_list = filtro_list.items()
|
||||
no_plot.infoLabels={'filtro':filtro_list}
|
||||
tmdb.set_infoLabels_item(no_plot, __modo_grafico__)
|
||||
|
||||
if no_plot.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(no_plot.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
no_plot.fanart = scrapertools.find_single_match(data,
|
||||
"<meta property='og:image' content='([^']+)' />").replace(
|
||||
'w780', 'original')
|
||||
no_plot.plot = scrapertools.find_single_match(data, '<div itemprop="description" '
|
||||
'class="wp-content">.*?<p>(['
|
||||
'^<]+)</p>')
|
||||
no_plot.plot = scrapertools.htmlclean(no_plot.plot)
|
||||
no_plot.infoLabels['director'] = scrapertools.find_single_match(data,
|
||||
'<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
no_plot.infoLabels['rating'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>(['
|
||||
'^<]+)</strong>')
|
||||
no_plot.infoLabels['votes'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>['
|
||||
'^<]+</strong>\s(.*?) votos</b>')
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -240,7 +240,8 @@ def findvideos(item):
|
||||
))
|
||||
for videoitem in templist:
|
||||
data = httptools.downloadpage(videoitem.url).data
|
||||
urls_list = scrapertools.find_multiple_matches(data, '({"type":.*?})')
|
||||
logger.debug(data)
|
||||
urls_list = scrapertools.find_multiple_matches(data, '{"reorder":1,"type":.*?}')
|
||||
for element in urls_list:
|
||||
json_data=jsontools.load(element)
|
||||
|
||||
@@ -253,6 +254,7 @@ def findvideos(item):
|
||||
|
||||
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
|
||||
'=%s&srt=%s' % (url, sub)
|
||||
logger.debug('new_url: %s' % new_url)
|
||||
|
||||
data = httptools.downloadpage(new_url).data
|
||||
data = re.sub(r'\\', "", data)
|
||||
|
||||
@@ -176,27 +176,45 @@ def peliculas(item):
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
|
||||
datas = httptools.downloadpage(scrapedurl).data
|
||||
datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas)
|
||||
# logger.info(datas)
|
||||
for scrapedurl, quality, year, scrapedtitle, scrapedthumbnail in matches:
|
||||
if '/ ' in scrapedtitle:
|
||||
scrapedtitle = scrapedtitle.partition('/ ')[2]
|
||||
contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
|
||||
contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
|
||||
rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
|
||||
director = scrapertools.find_single_match(
|
||||
datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
|
||||
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
|
||||
title = scrapedtitle
|
||||
contentTitle = title
|
||||
url = scrapedurl
|
||||
quality = quality
|
||||
thumbnail = scrapedthumbnail
|
||||
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie',
|
||||
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
|
||||
contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
|
||||
text_color=color3)
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=title, url=url,
|
||||
quality=quality,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
infoLabels={"year": year},
|
||||
text_color=color3
|
||||
))
|
||||
|
||||
if year:
|
||||
tmdb.set_infoLabels_item(new_item, __modo_grafico__)
|
||||
itemlist.append(new_item)
|
||||
# for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
|
||||
# datas = httptools.downloadpage(scrapedurl).data
|
||||
# datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas)
|
||||
# # logger.info(datas)
|
||||
# if '/ ' in scrapedtitle:
|
||||
# scrapedtitle = scrapedtitle.partition('/ ')[2]
|
||||
# contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
|
||||
# contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
|
||||
# rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
|
||||
# director = scrapertools.find_single_match(
|
||||
# datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
|
||||
# title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
|
||||
#
|
||||
# logger.debug('thumbnail: %s' % scrapedthumbnail)
|
||||
# new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie',
|
||||
# url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
|
||||
# contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
|
||||
# text_color=color3)
|
||||
# itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if paginacion:
|
||||
@@ -267,13 +285,13 @@ def findvideos(item):
|
||||
|
||||
if 'drive' not in servidores and 'streamvips' not in servidores and 'mediastream' not in servidores:
|
||||
if 'ultrastream' not in servidores:
|
||||
server = servertools.get_server_from_url('scrapedurl')
|
||||
server = servertools.get_server_from_url(scrapedurl)
|
||||
quality = scrapertools.find_single_match(
|
||||
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
|
||||
title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(servidores.capitalize(),
|
||||
quality.upper())
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, url='url', quality=item.quality,
|
||||
itemlist.append(item.clone(action='play', title=title, url=scrapedurl, quality=item.quality,
|
||||
server=server, language=lang.replace('Español ', ''),
|
||||
text_color=color3, thumbnail=item.thumbnail))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user