animeflv: fix

This commit is contained in:
Intel1
2018-07-04 09:08:39 -05:00
committed by GitHub
parent 05f18490e5
commit 8f406aee76

View File

@@ -1,9 +1,7 @@
# -*- coding: utf-8 -*-
import re
import time
import urlparse
import urllib
from channels import renumbertools
from core import httptools
@@ -18,13 +16,10 @@ HOST = "https://animeflv.net/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "browse?order=title"))
itemlist.append(Item(channel=item.channel, title="Buscar por:"))
itemlist.append(Item(channel=item.channel, action="search", title=" Título"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "browse",
@@ -35,9 +30,7 @@ def mainlist(item):
extra="year"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse",
extra="status"))
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
@@ -48,36 +41,29 @@ def search(item, texto):
texto = texto.replace(" ", "+")
post = "value=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
try:
dict_data = jsontools.load(data)
for e in dict_data:
if e["id"] != e["last_id"]:
_id = e["last_id"]
else:
_id = e["id"]
url = "%sanime/%s/%s" % (HOST, _id, e["slug"])
title = e["title"]
thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"])
new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail)
if e["type"] != "movie":
new_item.show = title
new_item.context = renumbertools.context(item)
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
@@ -88,39 +74,30 @@ def search_section(item):
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
patron = 'id="%s_select"[^>]+>(.*?)</select>' % item.extra
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<option value="([^"]+)">(.*?)</option>', re.DOTALL).findall(data)
for _id, title in matches:
url = "%s?%s=%s&order=title" % (item.url, item.extra, _id)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
context=renumbertools.context(item)))
return itemlist
def newest(categoria):
itemlist = []
if categoria == 'anime':
itemlist = novedades_episodios(Item(url=HOST))
return itemlist
def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<h2>Últimos episodios</h2>.+?<ul class="ListEpisodios[^>]+>(.*?)</ul>')
matches = re.compile('<a href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>'
'<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, str_episode, show in matches:
try:
episode = int(str_episode.replace("Episodio ", ""))
except ValueError:
@@ -135,28 +112,21 @@ def novedades_episodios(item):
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
fulltitle=title)
itemlist.append(new_item)
return itemlist
def novedades_anime(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
matches = re.compile('href="([^"]+)".+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
'(?:</p><p>(.*?)</p>.+?)?</article></li>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, _type, title, plot in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if _type != "Película":
@@ -165,173 +135,75 @@ def novedades_anime(item):
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
return itemlist
def listado(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')
data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
data = "".join(data)
matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
'.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, _type, title, plot in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if _type == "Anime":
new_item.show = title
new_item.context = renumbertools.context(item)
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
if url_pagination:
url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
# fix para renumbertools
item.show = scrapertools.find_single_match(data, '<h1 class="Title">(.*?)</h1>')
if item.plot == "":
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
matches = re.compile('href="([^"]+)"><figure><img class="[^"]+" data-original="([^"]+)".+?</h3>'
'\s*<p>(.*?)</p>', re.DOTALL).findall(data)
if matches:
for url, thumb, title in matches:
title = title.strip()
url = urlparse.urljoin(item.url, url)
# thumbnail = item.thumbnail
try:
episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$"))
except ValueError:
season = 1
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title)
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title,
fanart=item.thumbnail, contentType="episode"))
else:
# no hay thumbnail
matches = re.compile('<a href="(/ver/[^"]+)"[^>]+>(.*?)<', re.DOTALL).findall(data)
for url, title in matches:
title = title.strip()
url = urlparse.urljoin(item.url, url)
thumb = item.thumbnail
try:
episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$"))
except ValueError:
season = 1
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title)
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title,
fanart=item.thumbnail, contentType="episode"))
info = eval(scrapertools.find_single_match(data, 'anime_info = (.*?);'))
episodes = eval(scrapertools.find_single_match(data, 'var episodes = (.*?);'))
for episode in episodes:
url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0])
title = '1x%s Episodio %s' % (episode[0], episode[0])
itemlist.append(item.clone(title=title, url=url, action='findvideos', show=info[1]))
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca",
action="add_serie_to_library", extra="episodios"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
download_list = scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"')
for i in download_list:
list_videos.append(urllib.unquote_plus(i))
aux_url = []
cldup = False
for e in list_videos:
url_api = "https://s3.animeflv.com/check.php?server=%s&v=%s"
# izanagi, yourupload, hyperion
if e.startswith("https://s3.animeflv.com/embed"):
server, v = scrapertools.find_single_match(e, 'server=([^&]+)&v=(.*?)$')
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
if '{"error": "Por favor intenta de nuevo en unos segundos", "sleep": 3}' in data:
time.sleep(3)
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
if server != "hyperion":
url = scrapertools.find_single_match(data, '"file":"([^"]+)"')
if url:
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
else:
# pattern = '"direct":"([^"]+)"'
# url = scrapertools.find_single_match(data, pattern)
# itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
pattern = '"label":([^,]+),"type":"video/mp4","file":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, pattern)
video_urls = []
for label, url in matches:
video_urls.append([label, "mp4", url])
if video_urls:
video_urls.sort(key=lambda u: int(u[0]))
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
video_urls=video_urls))
download_list = scrapertools.find_multiple_matches(data, 'video\[\d+\] = \'<iframe .*?src="(.*?)"')
for url in download_list:
data = httptools.downloadpage(url).data
if 'izanagi' in url:
new_url = url.replace('embed', 'check')
new_data = httptools.downloadpage(new_url).data
url = scrapertools.find_single_match(new_data, '"file":"(.*?)"')
else:
if e.startswith("https://cldup.com") and not cldup:
itemlist.append(item.clone(title="Enlace encontrado en Cldup", action="play", url=e))
cldup = True
aux_url.append(e)
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
url = scrapertools.find_single_match(data, 'var redir = "(.*?)"')
if url != '':
url = url.replace("\\","")
itemlist.append(item.clone(title='%s', url=url, action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
return itemlist