Merge pull request #469 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-10-25 08:26:28 -05:00
committed by GitHub
8 changed files with 88 additions and 13 deletions

View File

@@ -8,7 +8,7 @@ from platformcode import config, logger
from channelselector import get_thumb
host = "http://gnula.nu/"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=20&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=%s&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
host_search = "https://cse.google.com/cse/element/v1?rsz=filtered_cse&num=20&hl=es&source=gcsc&gss=.es&sig=c891f6315aacc94dc79953d1f142739e&cx=014793692610101313036:vwtjajbclpq&q=%s&safe=off&cse_tok=%s&googlehost=www.google.com&callback=google.search.Search.csqr6098&nocache=1540313852177&start=0"
item_per_page = 20
@@ -58,9 +58,9 @@ def sub_search(item):
break
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + item_per_page
item.url = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
patron = '(?s)clicktrackUrl":\s*".*?q=(.*?)".*?'
patron += 'title":\s*"([^"]+)".*?'
patron += '"src":\s*"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'

View File

@@ -5,7 +5,7 @@ from core import httptools
from core import scrapertools
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'}
def test_video_exists(page_url):
@@ -28,7 +28,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
time.sleep(1)
data1 = httptools.downloadpage(page_url, post = post, headers = headers).data
patron = "window.open\('([^']+)"
file = scrapertools.find_single_match(data1, patron)
file = scrapertools.find_single_match(data1, patron).replace(" ","%20")
file += "|User-Agent=" + headers['User-Agent']
video_urls = []
videourl = file

View File

@@ -22,10 +22,12 @@ def get_video_url(page_url, user="", password="", video_password=""):
packed = scrapertools.find_multiple_matches(data, "(?s)<script>\s*eval(.*?)\s*</script>")
for pack in packed:
unpacked = jsunpack.unpack(pack)
if "file" in unpacked:
videos = scrapertools.find_multiple_matches(unpacked, 'file.="(//[^"]+)')
if "tida" in unpacked:
videos = scrapertools.find_multiple_matches(unpacked, 'tid.="([^"]+)')
video_urls = []
for video in videos:
if not video.startswith("//"):
continue
video = "https:" + video
video_urls.append(["mp4 [Thevid]", video])
logger.info("Url: %s" % videos)

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "https://vidcloud.co/embed/([a-z0-9]+)",
"pattern": "https://(?:vidcloud.co|vcstream.to)/embed/([a-z0-9]+)",
"url": "https://vidcloud.co/player?fid=\\1&page=embed"
}
]

View File

@@ -4,7 +4,6 @@
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
@@ -23,7 +22,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = data.replace('\\\\', '\\').replace('\\','')
patron = '"file":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(['vidcloud', url])
if not ".vtt" in url:
video_urls.append(['vidcloud', url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://xdrive.cc/embed/([A-z0-9]+)",
"url": "https://xdrive.cc/embed/\\1"
}
]
},
"free": true,
"id": "xdrive",
"name": "xdrive",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/MHyNdRPZ/xdrive.png"
}

View File

@@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para xdrive
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
return False, "[xdrive] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, "src: '([^']+).*?label: '([^']+)")
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
scrapedquality = scrapedquality.replace("&uarr;","")
video_urls.append([scrapedquality + " [xdrive]", scrapedurl])
video_urls.sort(key=lambda it: int(it[0].split("P ", 1)[0]))
return video_urls