Merge pull request #291 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-06-06 16:02:37 -05:00
committed by GitHub
6 changed files with 84 additions and 77 deletions

View File

@@ -33,13 +33,12 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php",
thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host,
# thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host,
# thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
# thumbnail=thumb_series))
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
thumbnail=thumb_series, range=[0,19] ))
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -60,8 +59,9 @@ def lista(item):
patron +=".+?<span .+?>(.+?)<\/span>" #scrapedplot
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedplot in matches:
if ":" in scrapedtitle:
next_page = [item.range[0]+19, item.range[1]+20]
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedplot in matches[item.range[0] : item.range[1]]:
if ":" in scrapedtitle:
cad = scrapedtitle.split(":")
show = cad[0]
else:
@@ -81,9 +81,15 @@ def lista(item):
context2 = autoplay.context
context.extend(context2)
scrapedurl=host+scrapedurl
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", show=show, context=context))
#tmdb.set_infoLabels(itemlist)
if item.title!="Series":
itemlist.append(item.clone(title=scrapedtitle, contentTitle=show,url=scrapedurl,
thumbnail=scrapedthumbnail, action="findvideos", context=context))
else:
itemlist.append(item.clone(title=scrapedtitle, contentSerieName=show,url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", context=context))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', action='lista'))
return itemlist
@@ -92,16 +98,16 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)
show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
#show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")
scrapedplot = scrapertools.find_single_match(data, '<span>Descripcion.+?<\/span>(.+?)<br>')
i = 0
temp = 0
infoLabels = item.infoLabels
for link, cap, name in matches:
if int(cap) == 1:
temp = temp + 1
@@ -109,19 +115,25 @@ def episodios(item):
cap = "0" + cap
season = temp
episode = int(cap)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
infoLabels['season'] = season
infoLabels['episode'] = episode
date = name
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# title = str(temp)+"x"+cap+" "+name
url = host + "/" + link
if "NO DISPONIBLE" not in name:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail,
plot=scrapedplot, url=url, show=show))
plot=scrapedplot, url=url, contentSeasonNumber=season, contentEpisodeNumber=episode,
contentSerieName=item.contentSerieName, infoLabels=infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.title))
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
@@ -149,15 +161,15 @@ def findvideos(item):
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '"(.+?)"')
for url in itemla:
url=url.replace('\/', '/')
server1=url.split('/')
server=server1[2]
if "." in server:
server1=server.split('.')
if len(server1)==3:
server=server1[1]
else:
server=server1[0]
url=url.replace('\/', '/')
server1=url.split('/')
server=server1[2]
if "." in server:
server1=server.split('.')
if len(server1)==3:
server=server1[1]
else:
server=server1[0]
if "goo" in url:
url = googl(url)
server='netutv'
@@ -168,6 +180,9 @@ def findvideos(item):
itemlist.append(item.clone(url=url, action="play",
thumbnail=scrapedthumbnail, server=server, plot=scrapedplot,
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
if item.contentTitle!="" and config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta película a la videoteca[/COLOR]", url=item.url,
action="add_pelicula_to_library", extra="episodios", show=item.contentTitle))
autoplay.start(itemlist, item)
return itemlist

View File

@@ -349,11 +349,16 @@ def fichas(item):
bus = host[-4:]
tag_type = scrapertools.find_single_match(url, '%s/([^/]+)/' %bus)
title += " - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]"
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
show=show, folder=True, contentType=contentType, contentTitle=contentTitle,
language =language, infoLabels=infoLabels))
if "/serie" in url or "/tags-tv" in url:
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
contentSerieName=show, folder=True, contentType=contentType,
language =language, infoLabels=infoLabels))
else:
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
folder=True, contentType=contentType, contentTitle=contentTitle,
language =language, infoLabels=infoLabels))
## Paginación
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>')
if next_page_url != "":
@@ -389,50 +394,41 @@ def episodios(item):
str = get_status(status, "shows", id)
if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
title = " ( [COLOR gray][B]" + item.contentSerieName + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=False))
Item(channel=item.channel, action="episodios", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=False))
title = str.replace('green', 'red').replace('Siguiendo', 'Abandonar')
itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
itemlist.append(Item(channel=item.channel, action="set_status", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=True))
elif account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=False))
Item(channel=item.channel, action="episodios", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=False))
title = " ( [COLOR orange][B]Seguir[/B][/COLOR] )"
itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
itemlist.append(Item(channel=item.channel, action="set_status", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=True))
patron = "<li><a href='([^']+)'>[^<]+</a></li>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if "temporada-0" in scrapedurl:
continue
## Episodios
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
post = "action=season&start=0&limit=0&show=%s&season=%s" % (sid, ssid)
url = host + "/a/episodes"
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
for episode in episodes:
thumbnail = host + "/thumbs/" + episode['thumbnail']
language = episode['languages']
temporada = episode['season']
episodio = episode['episode']
if len(episodio) == 1: episodio = '0' + episodio
if episode['languages'] != "[]":
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
@@ -440,15 +436,12 @@ def episodios(item):
idiomas = idiomas
else:
idiomas = ""
if episode['title']:
try:
title = episode['title']['es'].strip()
except:
title = episode['title']['en'].strip()
if len(title) == 0: title = "Temporada " + temporada + " Episodio " + episodio
try:
title = temporada + "x" + episodio + " - " + title.decode('utf-8') + ' ' + idiomas
except:
@@ -456,38 +449,30 @@ def episodios(item):
# try: title = temporada + "x" + episodio + " - " + title + ' ' + idiomas
# except: pass
# except: title = temporada + "x" + episodio + " - " + title.decode('iso-8859-1') + ' ' + idiomas
str = get_status(status, 'episodes', episode['id'])
if str != "": title += str
try:
title = title.encode('utf-8')
except:
title = title.encode('iso-8859-1')
url = urlparse.urljoin(scrapedurl, 'temporada-' + temporada + '/episodio-' + episodio) + "###" + episode[
'id'] + ";3"
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, show=item.show, folder=True, contentType="episode",
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, contentSerieName=item.contentSerieName, folder=True, contentType="episode",
language=language))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=url_targets,
action="add_serie_to_library", extra="episodios", show=item.show))
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=url_targets,
action="download_all_episodes", extra="episodios", show=item.show))
action="download_all_episodes", extra="episodios"))
return itemlist
def novedades_episodios(item):
logger.info()
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Episodios
url = item.url.split("?")[0]
post = item.url.split("?")[1]

View File

@@ -169,7 +169,7 @@ def seasons(item):
return itemlist
def all_episodes(item):
def epidodios(item):
logger.info()
itemlist = []
templist = seasons(item)
@@ -183,8 +183,9 @@ def episodesxseason(item):
itemlist = []
data = get_source(item.url)
season = item.contentSeasonNumber
season_data = scrapertools.find_single_match(data, '<div id=collapse%s.*?panel-success' % season)
patron = "<td><a href='([^ ]+)'.*?itemprop='episodeNumber'>%s+x(\d+)</span> - (.*?) </a>.*?(/banderas.*?)</td>" % season
matches = re.compile(patron, re.DOTALL).findall(data)
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
@@ -207,15 +208,22 @@ def episodesxseason(item):
def add_language(title, string):
logger.info()
language = []
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
language = []
for lang in languages:
if 'jap' in lang or lang not in IDIOMAS:
lang = 'vos'
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
if len(languages) == 1:
language = IDIOMAS[languages[0]]
title = '%s [%s]' % (title, language)
else:
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
return title, language
@@ -226,9 +234,8 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = "<a href=([^ ]+) target=_blank><img src='/servidores/(.*?).(?:png|jpg)'.*?sno.*?"
patron += "sno><span>(.*?)<.*?(/banderas.*?)td"
patron += "<span>(.*?)<.*?(/banderas.*?)td"
matches = re.compile(patron, re.DOTALL).findall(data)

View File

@@ -8,7 +8,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File Not Found" in data:
if "File Not Found" in data or "File was deleted" in data:
return False, "[clipwatching] El video ha sido borrado"
return True, ""

View File

@@ -19,7 +19,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data:
if "File was deleted" in data or "File Not Found" in data:
return False, "[Filebebo] El video ha sido borrado"
return True, ""

View File

@@ -31,8 +31,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
js_wise = scrapertools.find_single_match(data_page_url_hqq,
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data_unwise = jswise(js_wise).replace("\\", "")
at = scrapertools.find_single_match(data_unwise, 'var at\s*=\s*"([^"]+)"')
http_referer = scrapertools.find_single_match(data_unwise, 'var http_referer\s*=\s*"([^"]+)"')
at = scrapertools.find_single_match(data_unwise, 'at=(\w+)')
http_referer = scrapertools.find_single_match(data_unwise, 'http_referer=(.*?)&')
url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on" \
"&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=" % (id_video, at, http_referer)
data_player = httptools.downloadpage(url, add_referer=True).data