Add YesPornPlease

This commit is contained in:
l1231669
2017-08-27 22:42:06 +02:00
parent c750d78db9
commit d2bfe08cff
6 changed files with 210 additions and 0 deletions

View File

@@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core.item import Item
from platformcode import logger
from urlparse import urljoin
from core import servertools
HOST="http://yespornplease.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="links", title="Novedades", url=HOST))
itemlist.append(item.clone(action="categories", title="Categorías", url=urljoin(HOST, "categories")))
itemlist.append(item.clone(action="search", title="Buscar", url=urljoin(HOST, "search")))
return itemlist
def search(item, texto):
logger.info("texto = %s" %(texto))
item.url = urljoin(HOST, "search&q=" + texto)
try:
return links(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categories(item):
logger.info()
data = httptools.downloadpage(item.url).data
result = []
logger.info(data)
categories = re.findall("href=[\"'](?P<url>/search[^\"']+).*?>(?P<name>[^<>]+)</div>.*?badge[^>]+>(?P<counter>\d+)", data, re.DOTALL | re.MULTILINE)
for url, name, counter in categories:
result.append(item.clone(action = "links", title = "%s (%s videos)" % (name, counter), url = urljoin(item.url, url)))
return result
def get_page(url):
page = re.search("p=(\d+)", url)
if page:
return int(page.group(1))
return 1
def get_page_url(url, page):
logger.debug("URL: %s to page %d" % (url, page))
resultURL = re.sub("([&\?]p=)(?:\d+)", "\g<1>%d" % page, url)
if resultURL == url:
resultURL += ("&" if "?" in url else "?") + "p=%d" % (page)
logger.debug("Result: %s" % (resultURL))
return resultURL
def links(item):
logger.info()
data = httptools.downloadpage(item.url).data
logger.info(data)
reExpr = "<img\s+src=['\"](?P<img>[^'\"]+)[^>]+title[^'\"]*['\"](?P<title>[^\"]+)[^>]+id[^'\"]*['\"](?P<id>[^'\"]+)[^>]*>(?:[^<]*<[^>]+>(?P<quality>[^<]+)<)?[^<]*<[^>]*duration[^>]*>(?P<duration>[^<]+)"
reResults = re.findall(reExpr, data, re.MULTILINE | re.DOTALL)
result = []
for img, title, vID, quality, duration in reResults:
logger.info("[link] %(title)s [%(quality)s] [%(duration)s]: %(vid)s (%(img)s" % ({"title": title, "duration": duration, "vid": vID, "img": img, "quality": quality if quality else "--"}))
formattedQuality = ""
if quality:
formattedQuality += " [%s]" % (quality)
titleFormatted = "%(title)s%(quality)s [%(duration)s]" % ({"title": title, "quality": formattedQuality, "duration": duration})
result.append(item.clone(action = "play", title = titleFormatted, url = urljoin(item.url, "/view/%s" % (vID)), thumbnail = urljoin(item.url, img), vID = vID))
# Has pagination
paginationOccurences = data.count('class="prevnext"')
if paginationOccurences:
page = get_page(item.url)
logger.info("Page " + str(page) + " Ocurrences: " + str(paginationOccurences))
if page > 1:
result.append(item.clone(action = "links", title = "<< Anterior", url = get_page_url(item.url, page - 1)))
if paginationOccurences > 1 or page == 1:
result.append(item.clone(action = "links", title = "Siguiente >>", url = get_page_url(item.url, page + 1)))
return result
def play(item):
logger.info(item)
embededURL = urljoin(item.url, "/e/%s/width-650/height-400/autoplay-0/" % (item.vID))
itemlist = servertools.find_video_items(item.clone(url = embededURL))
return itemlist

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -0,0 +1,54 @@
{
"active": true,
"changes": [
{
"date": "27/08/2017",
"description": "Versión incial"
}
],
"find_videos": {
"ignore_urls": [
"http://streamin.to/embed-theme.html",
"http://streamin.to/embed-jquery.html"
],
"patterns": [
{
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']",
"url": "\\1"
}
]
},
"free": true,
"id": [
"vshare"
],
"name": "vshare",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_vshare.png",
"version": 1
}

View File

@@ -0,0 +1,53 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
from lib import jsunpack
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "El archivo no existe<br/>en streaminto o ha sido borrado."
elif "Video is processing now" in data:
return False, "El archivo está siendo procesado<br/>Prueba dentro de un rato."
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url = " + page_url)
data = httptools.downloadpage(page_url).data
flowplayer = re.search("url: [\"']([^\"']+)", data)
if flowplayer:
return [["FLV", flowplayer.group(1)]]
jsUnpack = jsunpack.unpack(data)
logger.debug(jsUnpack)
video_urls = []
fields = re.search("\[([^\]]+).*?parseInt\(value\)-(\d+)", jsUnpack)
if fields:
logger.debug("Values: " + fields.group(1))
logger.debug("Substract: " + fields.group(2))
substract = int(fields.group(2))
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult)
logger.debug(strResult)
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources:
logger.debug("[" + label + "] " + url)
video_urls.append([label, url])
return video_urls