correcciones y novedades

- AnimeFlv: Corrección para videoteca
- BlogHorror: Corrección por cambio de estructura
- CanalPelis: Corrección por cambio de estructura
- Dilo: Correccion en listados de episodios
- FanPelis: Corrección en la detección de enlaces
- PelisPlus: Corrección en la detección de temporadas, eposidios y enlaces
- PelisR: Corrección en la detección de enlaces
- RetroSeries: Corrección por cambio de estructura
- TuPelicua: Nuevo canal
This commit is contained in:
Alfa-beto
2018-10-31 12:24:45 -03:00
committed by GitHub
parent e700becfed
commit d16e3efffb
10 changed files with 402 additions and 81 deletions

View File

@@ -181,7 +181,7 @@ def episodios(item):
itemlist.append(item.clone(title=title, url=url, action='findvideos', show=info[1]))
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca",
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios"))
return itemlist

View File

@@ -30,8 +30,8 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all",
url=host+'/category/terror', thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))

View File

@@ -348,31 +348,38 @@ def episodios(item):
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
import urllib
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
if lang == 'en':
lang = 'VOSE'
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'movie'}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
new_data = httptools.downloadpage(hidden_url, follow_redirects=False)
try:
b64_url = scrapertools.find_single_match(new_data.headers['location'], "y=(.*)")
url = base64.b64decode(b64_url)
except:
url = hidden_url
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title='%s', action='play', language=lang,
infoLabels=item.infoLabels))
for option, url in matches:
datas = httptools.downloadpage(urlparse.urljoin(host, url),
headers={'Referer': item.url}).data
patron = '<iframe[^>]+src="([^"]+)"'
url = scrapertools.find_single_match(datas, patron)
lang = scrapertools.find_single_match(
data, '<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span class="dt_flag">' % option)
lang = lang.replace('Español ', '').replace('B.S.O. ', '')
server = servertools.get_server_from_url(url)
title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang)
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
server=server, language = lang, text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",

View File

@@ -204,14 +204,13 @@ def episodesxseason(item):
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
episode = dict['number']
epi_name = dict['name']
title = '%sx%s - %s' % (season, episode, epi_name)
url = '%s%s/' % (host, dict['permalink'])
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, action='findvideos', url=url,
contentEpisodeNumber=season, id=item.id, infoLabels=infoLabels))
contentEpisodeNumber=episode, id=item.id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -247,10 +247,17 @@ def episodesxseason(item):
def findvideos(item):
logger.info()
import urllib
itemlist = []
data = get_source(item.url)
patron = '<div class="movieplay"><iframe src="([^"]+)"'
player = scrapertools.find_single_match(data, "({'action': 'movie_player','foobar_id':\d+,})")
post = eval(player)
post = urllib.urlencode(post)
data = httptools.downloadpage(host+'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)

View File

@@ -19,7 +19,7 @@ from lib import generictools
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_quality = ['360p', '480p', '720p', '1080']
list_servers = [
'directo',
@@ -109,7 +109,8 @@ def list_all(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
title = scrapertools.find_single_match(scrapedtitle, '([^\(]+)\(?').strip()
thumbnail = scrapedthumbnail
filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w300", "")
filter_list = {"poster_path": filter_thumb}
@@ -120,14 +121,14 @@ def list_all(item):
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro':filter_list})
infoLabels={'filtro':filter_list, 'year':year})
if item.type == 'peliculas' or 'serie' not in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
new_item.contentTitle = title
else:
new_item.action = 'seasons'
new_item.contentSerieName = scrapedtitle
new_item.contentSerieName = title
itemlist.append(new_item)
@@ -147,7 +148,7 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='data-toggle="tab">TEMPORADA (\d+)</a>'
patron='data-toggle="tab">TEMPORADA.?(\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -182,8 +183,7 @@ def episodesxseasons(item):
season = item.infoLabels['season']
data=get_source(item.url)
season_data = scrapertools.find_single_match(data, 'id="pills-vertical-%s">(.*?)</div>' % season)
patron='href="([^"]+)".*?block">Capitulo (\d+) - ([^<]+)<'
patron='href="([^"]+)".*?block">Capitulo(\d+) -.?([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
@@ -218,36 +218,53 @@ def section(item):
def findvideos(item):
logger.info()
import urllib
itemlist = []
data = get_source(item.url)
servers_page = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = get_source(servers_page)
patron = '<a href="([^"]+)"'
patron = 'video\[\d+\] = "([^"]+)";'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url in matches:
url_data = get_source(enc_url, referer=item.url)
hidden_url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
if 'server' in hidden_url:
hidden_data = get_source(hidden_url)
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = hidden_url
if 'pelishd.tv' in url:
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
dejuiced = generictools.dejuice(vip_data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
for video_url in matches:
language = 'latino'
if not config.get_setting('unify'):
title = ' [%s]' % language.capitalize()
else:
title = ''
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))
if 'pelisplus.net' in video_url:
referer = video_url
post = {'r':item.url}
post = urllib.urlencode(post)
video_url = video_url.replace('/v/', '/api/sources/')
url_data = httptools.downloadpage(video_url, post=post, headers={'Referer':referer}).data
patron = '"file":"([^"]+)","label":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(url_data)
for url, quality in matches:
url = url.replace('\/', '/')
itemlist.append(
Item(channel=item.channel, title='%s' + title, url=url, action='play', language=IDIOMAS[language],
quality=quality, infoLabels=item.infoLabels))
else:
url_data = get_source(video_url)
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
if 'server' in url:
hidden_data = get_source(hidden_url)
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = url
if 'pelishd.net' in url:
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
dejuiced = generictools.dejuice(vip_data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
if url != '':
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

View File

@@ -266,10 +266,13 @@ def findvideos(item):
pass
else:
url = scrapedurl
url = url +"|referer=%s" % item.url
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
try:
url = url +"|referer=%s" % item.url
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
except:
pass
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())

View File

@@ -35,10 +35,10 @@ def mainlist(item):
section='genres'))
itemlist.append(item.clone(title="Por Año", action="section", url=host, thumbnail=get_thumb('year', auto=True),
section='year'))
section='releases'))
itemlist.append(item.clone(title="Alfabetico", action="section", url=host, thumbnail=get_thumb('alphabet', auto=True),
section='abc'))
#itemlist.append(item.clone(title="Alfabetico", action="section", url=host, thumbnail=get_thumb('alphabet', auto=True),
# section='glossary'))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
@@ -49,7 +49,7 @@ def mainlist(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
@@ -57,11 +57,12 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<article id=post-.*?<a href=(.*?)><img src=(.*?) alt=(.*?)><.*?<span>(.*?)<'
patron = '<article id="post-\d+.*?<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">.*?</h3> <span></span> <span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
url = scrapedurl
contentSerieName = scrapedtitle
@@ -76,8 +77,7 @@ def list_all(item):
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'rel=next.*?href=(.*?) ')
url_next_page = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
@@ -88,8 +88,8 @@ def section(item):
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<ul class=%s(.*?)</ul>' % item.section)
patron = '<a href=(.*?)>(.*?)</a>'
data = scrapertools.find_single_match(data, '<ul class="%s(.*?)</ul>' % item.section)
patron = '<a href="([^"]+)".?>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -103,7 +103,7 @@ def seasons(item):
itemlist = []
data = get_source(item.url)
patron = '<span class=title>Temporada(\d+) <'
patron = '<span class="title">Temporada (\d+) <'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
@@ -138,8 +138,8 @@ def episodesxseason(item):
data = get_source(item.url)
infoLabels = item.infoLabels
season = infoLabels['season']
patron = '<img src=([^>]+)></a></div><div class=numerando>%s - (\d+)</div>' % season
patron += '<div class=episodiotitle><a href=(.*?)>(.*?)</a><'
patron = '<img src="([^>]+)"></a></div><div class="numerando">%s+ - (\d+)</div>' % season
patron += '<div class="episodiotitle"><a href="([^"]+)">(.*?)</a><'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedepi, scrapedurl, scrapedtitle in matches:
@@ -156,24 +156,29 @@ def episodesxseason(item):
def findvideos(item):
logger.info()
from lib import generictools
import urllib
itemlist = []
data = get_source(item.url)
patron = 'id=([^ ]+) class=play-box-iframe .*?src=(.*?) frameborder=0.*?'
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
if lang == 'ar':
lang = 'lat'
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'tv'}
post = urllib.urlencode(post)
for option, scrapedurl in matches:
#language = scrapertools.find_single_match(data, '#%s.*?dt_flag><img src=.*?flags/(.*?).png' % option)
#title = '%s [%s]'
language = ''
title = '%s'
SerieName = item.contentSerieName
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=SerieName, url=scrapedurl,
action='play', language=language, infoLabels=item.infoLabels))
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title='%s', action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
#itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server.capitalize(), i.language))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
def search_results(item):
@@ -182,7 +187,8 @@ def search_results(item):
itemlist = []
data = get_source(item.url)
patron = '<article.*?<a href=(.*?)><img src=(.*?) alt=(.*?)><.*?year>(.*?)<.*?<p>(.*?)</p>'
data = scrapertools.find_single_match(data, '<h1>Resultados encontrados:(.*?)genres')
patron = '<article.*?<a href="([^"]+)"><img src="([^"]+)".*?alt="([^"]+)".*?class="year">(\d{4}).*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot in matches:

View File

@@ -0,0 +1,77 @@
{
"id": "tupelicula",
"name": "TuPelicula",
"active": true,
"adult": false,
"language": ["lat", "cast", "*"],
"thumbnail": "https://i.postimg.cc/W4TbdCDP/tupelicula.png",
"banner": "",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,205 @@
# -*- coding: utf-8 -*-
# -*- Channel TuPelicula -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.tupelicula.tv/'
IDIOMAS = {'la_la': 'LAT', 'es_es':'CAST', 'en_es':'VOSE', 'en_en':'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['xdrive', 'bitertv', 'okru']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url=host+'filter?language=1',
thumbnail=get_thumb('cast', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'filter?language=2',
thumbnail=get_thumb('lat', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'filter?language=4',
thumbnail=get_thumb('vose', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<div id="movie-list"(.*?)</ul>')
patron = '<a href="([^"]+)".*?data-original="([^"]+)" alt="([^"]+)".*?'
patron += '<div class="_audio">(.*?)"label_year">(\d{4}) &bull;([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, year, genre in matches:
url = scrapedurl
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '([^\(]+)')
lang = get_language(lang_data)
thumbnail = 'https:'+scrapedthumbnail
if genre.lower() not in ['adultos', 'erotico'] or config.get_setting('adult_mode') > 0:
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=url, action='findvideos',
thumbnail=thumbnail, contentTitle=scrapedtitle, language = lang,
infoLabels={'year':year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(full_data, '<li><a href="([^"]+)"><i class="fa fa-angle-right">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '>Películas por género</div>(.*?)</ul>')
patron = '<a href="([^"]+)"><span class="icon"></span>.?([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
if title.lower() not in ['adultos', 'erotico'] or config.get_setting('adult_mode') > 0:
itemlist.append(Item(channel=item.channel, title=title, url=url, action='list_all'))
return itemlist
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png"?')
for lang in lang_list:
lang = IDIOMAS[lang]
if lang not in language:
language.append(lang)
return language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
player = scrapertools.find_single_match(data, '<iframe id="playerframe" data-src="([^"]+)"')
data = get_source(player)
patron = 'data-id="(\d+)">.*?img src="([^"]+)".*?>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scraped_id, lang_data, quality in matches:
hidden_url = get_source('%splayer/rep/%s' % (host, scraped_id), player)
url = scrapertools.find_single_match(hidden_url, 'iframe src=.?"([^"]+)"').replace('\\','')
lang = get_language(lang_data)
itemlist.append(Item(channel=item.channel, title='%s', url=url, action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'latino':
item.url = host + 'filter?language=2'
elif categoria == 'castellano':
item.url = host + 'filter?language=1'
elif categoria == 'infantiles':
item.url = host + 'genre/25/infantil'
elif categoria == 'terror':
item.url = host + 'genre/15/terror'
item.pages=3
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist