Merge pull request #516 from Alfa-beto/fixes

Correcciones
This commit is contained in:
Alfa
2019-01-02 14:15:29 -05:00
committed by GitHub
8 changed files with 86 additions and 94 deletions

View File

@@ -90,10 +90,13 @@ def mainlist(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -107,10 +110,11 @@ def lista(item):
post = {'tipo': 'episodios', '_token': 'rAqVX74O9HVHFFigST3M9lMa5VL7seIO7fT8PBkl'}
post = urllib.urlencode(post)
data = get_source(item.url)
patron = 'class=anime><div class=cover style=background-image: url(.*?)>.*?<a href=(.*?)><h2>(.*?)<\/h2><\/a><\/div>'
patron = 'class="anime"><a href="([^"]+)">'
patron +='<div class="cover" style="background-image: url\((.*?)\)">.*?<h2>([^<]+)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
title = scrapedtitle
@@ -124,13 +128,13 @@ def lista(item):
# Paginacion
next_page = scrapertools.find_single_match(data,
'<a href=([^ ]+) rel=next>&raquo;</a>')
'<a href="([^"]+)" data-ci-pagination-page="\d+" rel="next"')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="lista",
title=">> Página siguiente",
url=host+next_page_url,
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
@@ -158,7 +162,7 @@ def generos(item):
itemlist = []
data = get_source(item.url)
patron = '<li class=><a href=https:\/\/www\.animeshd\.tv\/genero\/(.*?)>(.*?)<\/a><\/li>'
patron = '<a href="https:\/\/www\.animeshd\.tv\/genero\/([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -180,8 +184,8 @@ def episodios(item):
itemlist = []
data = get_source(item.url)
patron = '<li id=epi-.*? class=list-group-item.*?><a href=(.*?) class=badge.*?width=25 title=(.*?)>.*?<\/span>(' \
'.*?) (\d+)<\/li>'
patron = '<li id="epi-.*? class="list-group-item.*?"><a href="([^"]+)".*?'
patron += 'class="badge".*?width="25" title="([^"]+)">.*?<\/span>(.*?) (\d+)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels

View File

@@ -9,7 +9,7 @@
"banner": "",
"categories": [
"movie",
"vose",
"vos"
],
"settings": [
{

View File

@@ -426,7 +426,10 @@ def findvideos(item):
url = url.replace('\\', '')
servername = servertools.get_server_from_url(url)
if 'pelishd24.net' in url or 'stream.pelishd24.com' in url:
url = url.strip()
vip_data = httptools.downloadpage(url).data
if 'Archivo ELiminado' in vip_data:
continue
dejuiced = generictools.dejuice(vip_data)
patron = '"file":"([^"]+)"'
match = re.compile(patron, re.DOTALL).findall(dejuiced)

View File

@@ -286,7 +286,7 @@ def temporadas(item):
if len(matches) > 1:
for scrapedthumbnail, temporada, url in matches:
new_item = item.clone(action="episodios", season=temporada, url=url,
new_item = item.clone(action="episodesxseason", season=temporada, url=url,
thumbnail=host + scrapedthumbnail, extra='serie')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
@@ -308,10 +308,18 @@ def temporadas(item):
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episodios(item)
return episdesxseason(item)
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
from core import jsontools
@@ -331,7 +339,6 @@ def episodios(item):
episode = element['metas_formateadas']['nepisodio']
season = element['metas_formateadas']['ntemporada']
scrapedurl = element['url_directa']
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(
@@ -359,10 +366,10 @@ def episodios(item):
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
# if config.get_videolibrary_support() and len(itemlist) > 0:
# itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
# action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
# text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist

View File

@@ -138,12 +138,15 @@ def episodesxseason(item):
data = get_source(item.url)
infoLabels = item.infoLabels
season = infoLabels['season']
patron = '<img src="([^>]+)"></a></div><div class="numerando">%s+ - (\d+)</div>' % season
patron = '<img src="([^>]+)"></a></div><div class="numerando">%s+ - (\d+|\d+\/\d+)</div>' % season
patron += '<div class="episodiotitle"><a href="([^"]+)">(.*?)</a><'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedepi, scrapedurl, scrapedtitle in matches:
if '/' in scrapedepi:
scrapedepi = scrapertools.find_single_match (scrapedepi, '(\d+)\/\d+')
title = '%sx%s - %s' % (season, scrapedepi, scrapedtitle)
infoLabels['episode'] = scrapedepi
if scrapedepi > 0:

View File

@@ -45,10 +45,11 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title='Nuevos Capitulos', url=host, action='new_episodes', type='tvshows',
thumbnail=get_thumb('new_episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series?', action='list_all', type='tvshows',
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series/estrenos', action='list_all',
type='tvshows',
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series?', action='list_all', type='tvshows',
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series', action='list_all', type='tvshows',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + 'search?s=',
@@ -83,19 +84,14 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<article class=".*?">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='<span class="year">(\d{4})</span>.*?<span class="(?:animes|tvs)">([^<]+)<'
patron = '<article class=".*?">.*? href="([^"]+)".*?<img src="([^"]+)".*?<h3 class="card-tvshow__title">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
if scrapedtype == 'Anime':
action = 'episodesxseasons'
elif scrapedtype == 'Serie':
action = 'seasons'
action = 'seasons'
new_item = Item(channel=item.channel,
action=action,
@@ -103,15 +99,14 @@ def list_all(item):
url=url,
contentSerieName=scrapedtitle,
thumbnail=thumbnail,
type=scrapedtype,
infoLabels={'year':year})
)
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'li><a href="([^"]+)" rel="next">&raquo;</a>')
url_next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -123,14 +118,16 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='<li class="gridseason"><a href="([^"]+)"><span class="title">Temporada (\d+)</span>'
patron='<div class="season__title">Temporada (\d+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
item.type = 'Anime'
return episodesxseasons(item)
infoLabels = item.infoLabels
for scrapedurl, season in matches:
for season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -156,23 +153,27 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
if item.type == 'Anime':
season = '1'
patron = '<a class="episodie-list" href="([^"]+)" .*?</i> Episodio (\d+).*?</span>'
else:
season = item.infoLabels['season']
episode = len(matches)
for scrapedurl, scrapedtitle in matches:
patron = 'class="episodie-list" href="([^"]+)" title=".*?Temporada %s .*?pisodio (\d+).*?">' % season
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches:
patron = 'class="episodie-list" href="([^"]+)" title=".*?pisodio (\d+).*?">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, episode in matches:
infoLabels['episode'] = episode
url = scrapedurl
title = scrapedtitle.replace(' online', '')
title = '%sx%s - %s' % (season, episode, title)
title = '%sx%s - Episodio %s' % (season, episode, episode)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
episode -= 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
@@ -183,7 +184,7 @@ def new_episodes(item):
itemlist = []
data = get_source(item.url)
patron = '<article class="contenedor">.*?<a href="([^"]+)" title=".*?">.*?data-src="([^"]+)" alt="([^"]+)">'
patron = '<div class="card-episodie shadow-sm"><a href="([^"]+)".*?data-src="([^"]+)" alt="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -248,34 +249,10 @@ def search(item, texto):
item.url = item.url + texto
if texto != '':
return search_results(item)
return list_all(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<div class="search-results__img"><a href="([^"]+)" title=".*?"><img src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2></a><div class="description">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
new_item=Item(channel=item.channel, title=title, url=url, contentSerieName=title, thumbnail=thumbnail,
plot=plot, action='seasons')
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()

View File

@@ -77,7 +77,7 @@ def list_all(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" >Página siguiente')
next_page = scrapertools.find_single_match(data, 'class=\'current\'>\d</span>.*?href="([^"]+)">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
@@ -187,12 +187,12 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
if 'id=' in link:
id_type = 'id'
ir_type = 'ir'
elif 'ud=' in link:
id_type = 'ud'
ir_type = 'ur'
id_letter = scrapertools.find_single_match(link, '?(\w)d')
id_type = '%sd' % id_letter
ir_type = '%sr' % id_letter
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)

View File

@@ -2185,21 +2185,20 @@ def acciones_trakt(item):
'runtime': config.get_localized_string(70471), 'popularity': config.get_localized_string(70472), 'percentage': config.get_localized_string(70473),
'votes': config.get_localized_string(70474), 'asc': config.get_localized_string(70475), 'desc': config.get_localized_string(70476)}
orden = valores[item.order] + " " + valores[item.how]
itemlist.append(item.clone(title=config.get_localized_string(70349) % orden, action="order_list",
text_color=color4))
# itemlist.append(item.clone(title=config.get_localized_string(70349) % orden, action="order_list",
# text_color=color4))
ratings = []
try:
if item.order:
if item.how == "asc":
reverse = False
else:
reverse = True
if item.order == "rank" or item.order == "added":
data = sorted(data, key=lambda x: x[item.order.replace("added", "listed_at")], reverse=reverse)
else:
order = item.order.replace("popularity", "votes").replace("percentage", "rating")
data = sorted(data, key=lambda x: x[x['type']].get(order, 0), reverse=reverse)
# if item.order:
# if item.how == "asc":
# reverse = False
# else:
# reverse = True
# if item.order == "rank" or item.order == "added":
# data = sorted(data, key=lambda x: x[item.order.replace("added", "last_collected_at")], reverse=reverse)
# else:
# order = item.order.replace("popularity", "votes").replace("percentage", "rating")
# data = sorted(data, key=lambda x: x[x['type']].get(order, 0), reverse=reverse)
for entry in data:
try:
@@ -2259,7 +2258,7 @@ def order_list(item):
logger.info()
list_controls = []
valores1 = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes']
valores1 = ['rating', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes']
valores2 = ['asc', 'desc']
dict_values = {'orderby': valores1.index(item.order), 'orderhow': valores2.index(item.how)}
@@ -2268,9 +2267,8 @@ def order_list(item):
'type': 'list', 'default': 0, 'visible': True})
list_controls.append({'id': 'orderhow', 'label': 'De forma:', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
list_controls[0]['lvalues'] = [config.get_localized_string(70003), config.get_localized_string(70469), config.get_localized_string(60230), config.get_localized_string(70470), config.get_localized_string(70471), config.get_localized_string(70472),
config.get_localized_string(70473), config.get_localized_string(70474)]
list_controls[1]['lvalues'] = [config.get_localized_string(70477), config.get_localized_string(70478)]
list_controls[0]['lvalues'] = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes']
list_controls[1]['lvalues'] = ['asc', 'desc']
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption=config.get_localized_string(70320), item=item, callback='order_trakt')