Merge pull request #545 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2019-01-30 14:22:47 -05:00
committed by GitHub
3 changed files with 20 additions and 23 deletions

View File

@@ -18,7 +18,7 @@ from platformcode import config, logger
HOST = "http://www.seriespapaya.com"
IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOSE', 'Español Latino':'lat',
IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOSE', 'Español Latino':'Latino',
'Español Castellano':'es', 'Sub Español':'VOSE'}
list_idiomas = IDIOMAS.values()
list_quality = ['360p', '480p', '720p HD', '1080p HD', 'default']
@@ -199,12 +199,13 @@ def findvideos(item):
server=server.rstrip().capitalize(),
quality=quality,
uploader=uploader),
server=server.lower().rstrip(),
server=server.rstrip().lower(),
url=urlparse.urljoin(HOST, url),
language=IDIOMAS.get(lang,lang),
quality=quality
)
)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality)
# Requerido para AutoPlay
@@ -216,11 +217,11 @@ def play(item):
logger.info("play: %s" % item.url)
itemlist = []
data = httptools.downloadpage(item.url).data
new_url = scrapertools.find_single_match(data, "location.href='([^']+)")
if new_url != '':
item.url = new_url
if item.server not in ['openload', 'streamcherry', 'streamango']:
item.server = ''
item.url = scrapertools.find_single_match(data, "location.href='([^']+)'")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist[0].thumbnail=item.contentThumbnail
return itemlist

View File

@@ -1,26 +1,23 @@
# -*- coding: utf-8 -*-
from core import jsontools
from core import httptools
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s' , user='%s' , password='%s', video_password=%s)" % (
page_url, user, "**************************"[0:len(password)], video_password))
logger.info()
page_url = correct_url(page_url)
url = 'http://www.alldebrid.com/service.php?pseudo=%s&password=%s&link=%s&nb=0&json=true&pw=' % (
user, password, page_url)
data = jsontools.load(scrapertools.downloadpage(url))
dd1 = httptools.downloadpage("https://api.alldebrid.com/user/login?agent=mySoft&username=%s&password=%s" %(user, password)).data
token = scrapertools.find_single_match(dd1, 'token":"([^"]+)')
dd2 = httptools.downloadpage("https://api.alldebrid.com/link/unlock?agent=mySoft&token=%s&link=%s" %(token, page_url)).data
link = scrapertools.find_single_match(dd2, 'link":"([^"]+)')
link = link.replace("\\","")
video_urls = []
if data and data["link"] and not data["error"]:
extension = ".%s [alldebrid]" % data["filename"].rsplit(".", 1)[1]
video_urls.append([extension, data["link"]])
if link:
extension = "mp4 [alldebrid]"
video_urls.append([extension, link])
else:
try:
server_error = "Alldebrid: " + data["error"].decode("utf-8", "ignore")
@@ -30,16 +27,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
"Servidor no soportado o en mantenimiento")
except:
server_error = "Alldebrid: Error en el usuario/password o en la web"
video_urls.append([server_error, ''])
return video_urls
def correct_url(url):
if "userporn.com" in url:
url = url.replace("/e/", "/video/")
if "putlocker" in url:
url = url.replace("/embed/", "/file/")
return url

View File

@@ -20,5 +20,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
data = httptools.downloadpage(page_url, post={}).data
data = jsontools.load(data)
for videos in data["data"]:
video_urls.append([videos["label"] + " [fembed]", "https://www.fembed.com" + videos["file"]])
v = videos["file"]
if not v.startswith("http"): v = "https://www.fembed.com" + videos["file"]
video_urls.append([videos["label"] + " [fembed]", v])
return video_urls