@@ -1,11 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import time
|
||||
import urlparse
|
||||
import urllib
|
||||
|
||||
from channels import renumbertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
@@ -273,42 +276,54 @@ def findvideos(item):
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
|
||||
list_videos.extend(scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"'))
|
||||
# logger.info("data=%s " % list_videos)
|
||||
|
||||
download_list = scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"')
|
||||
for i in download_list:
|
||||
list_videos.append(urllib.unquote_plus(i))
|
||||
aux_url = []
|
||||
cldup = False
|
||||
for e in list_videos:
|
||||
if e.startswith("https://s3.animeflv.com/embed.php?"):
|
||||
server = scrapertools.find_single_match(e, 'server=(.*?)&')
|
||||
e = e.replace("embed", "check").replace("https", "http")
|
||||
data = httptools.downloadpage(e).data.replace("\\", "")
|
||||
url_api = "https://s3.animeflv.com/check.php?server=%s&v=%s"
|
||||
# izanagi, yourupload, hyperion
|
||||
if e.startswith("https://s3.animeflv.com/embed"):
|
||||
server, v = scrapertools.find_single_match(e, 'server=([^&]+)&v=(.*?)$')
|
||||
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
|
||||
|
||||
if '{"error": "Por favor intenta de nuevo en unos segundos", "sleep": 3}' in data:
|
||||
import time
|
||||
time.sleep(3)
|
||||
data = httptools.downloadpage(e).data.replace("\\", "")
|
||||
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
|
||||
|
||||
video_urls = []
|
||||
if server == "gdrive":
|
||||
data = jsontools.load(data)
|
||||
for s in data.get("sources", []):
|
||||
video_urls.append([s["label"], s["type"], s["file"]])
|
||||
|
||||
if video_urls:
|
||||
video_urls.sort(key=lambda v: int(v[0]))
|
||||
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
|
||||
video_urls=video_urls))
|
||||
else:
|
||||
if server != "hyperion":
|
||||
url = scrapertools.find_single_match(data, '"file":"([^"]+)"')
|
||||
if url:
|
||||
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
|
||||
|
||||
else:
|
||||
# pattern = '"direct":"([^"]+)"'
|
||||
# url = scrapertools.find_single_match(data, pattern)
|
||||
# itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
|
||||
|
||||
pattern = '"label":([^,]+),"type":"video/mp4","file":"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
|
||||
video_urls = []
|
||||
for label, url in matches:
|
||||
video_urls.append([label, "mp4", url])
|
||||
if video_urls:
|
||||
video_urls.sort(key=lambda u: int(u[0]))
|
||||
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
|
||||
video_urls=video_urls))
|
||||
|
||||
else:
|
||||
if e.startswith("https://cldup.com") and cldup == False:
|
||||
itemlist.append(item.clone(title="Enlace encontrado en Cldup",
|
||||
action="play",
|
||||
url = e))
|
||||
cldup = True
|
||||
aux_url.append(e)
|
||||
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
|
||||
for videoitem in itemlist:
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
|
||||
@@ -381,11 +381,10 @@ def play(item):
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
action = "play",
|
||||
title = "%s",
|
||||
fulltitle = item.fulltitle,
|
||||
thumbnail = item.thumbnail,
|
||||
server = "",
|
||||
url = item.url
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -374,7 +374,7 @@ def get_episodios(item):
|
||||
paginacion = scrapertools.get_match(data, patron)
|
||||
# logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion)
|
||||
if "Next" in paginacion:
|
||||
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1]
|
||||
url_next_page = "http" + scrapertools.get_match(paginacion, '<a href="http([^>]+)>Next</a>')[:-1]
|
||||
url_next_page = url_next_page.replace(" ", "%20")
|
||||
# logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page)
|
||||
itemlist.append(
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -28,7 +29,7 @@ def PorFecha(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="lanzamiento">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -54,7 +55,7 @@ def Idiomas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="idioma">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -80,7 +81,7 @@ def calidades(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="calidades">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -106,7 +107,7 @@ def generos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="generos">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -121,6 +122,8 @@ def generos(item):
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if "Adulto" in title and config.get_setting("adult_mode") == 0:
|
||||
continue
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, viewmode="movie"))
|
||||
@@ -148,7 +151,7 @@ def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info("data=" + data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
@@ -178,7 +181,7 @@ def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.info("data="+data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
|
||||
@@ -19,27 +19,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
request = urllib2.Request(jk_url, headers=request_headers)
|
||||
response = urllib2.urlopen(request)
|
||||
video_urls.append([".mp4 [redirects]", response.geturl()])
|
||||
else:
|
||||
headers = []
|
||||
headers.append(["User-Agent",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.94 Safari/537.36"])
|
||||
headers.append(["Accept-Encoding", "gzip,deflate,sdch"])
|
||||
page_url = page_url.replace("https://animeflv.net/embed_izanagi.php?key=",
|
||||
"https://s2.animeflv.net/izanagi.php?id=")
|
||||
page_url = page_url.replace("http://animeflv.net/embed_yotta.php?key=",
|
||||
"https://s1.animeflv.com/gdrive.php?id=")
|
||||
data = scrapertools.cache_page(page_url, headers=headers)
|
||||
data = data.replace("\\\\", "")
|
||||
data = data.replace("\\/", "/")
|
||||
patronvideos = '"file":"(.+?)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
video_urls.append([".mp4 [redirects]", match])
|
||||
|
||||
patronvideos = '(http://www.animeid.+?)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
response = urllib2.urlopen(match)
|
||||
video_urls.append([".mp4 [redirects]", response.geturl()])
|
||||
|
||||
return video_urls
|
||||
|
||||
@@ -8,7 +8,7 @@ from platformcode import logger
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Page not found" in data:
|
||||
if "Page not found" in data or "File was deleted" in data:
|
||||
return False, "[vidoza] El archivo no existe o ha sido borrado"
|
||||
elif "processing" in data:
|
||||
return False, "[vidoza] El vídeo se está procesando"
|
||||
|
||||
Reference in New Issue
Block a user