Merge pull request #491 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-11-22 08:14:34 -05:00
committed by GitHub
12 changed files with 92 additions and 265 deletions

View File

@@ -433,7 +433,14 @@ def newest(categoria):
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "-")
item.url = item.host + '?s=' + texto
if texto != '':
return peliculas(item)
if item.host != '':
host_list = [item.host]
else:
host_list = ['http://www.cinecalidad.to', 'http://cinecalidad.to/espana/']
for host_name in host_list:
item.url = host_name + '?s=' + texto
if texto != '':
itemlist.extend(peliculas(item))
return itemlist

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
@@ -176,11 +175,11 @@ def destacadas(item):
item.text_color = color2
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="letter_home"')
patron = '(?s)title="([^"]+)".*?'
patron += 'href="([^"]+)".*?'
patron = '(?s)href="([^"]+)".*?'
patron += 'alt="([^"]+)".*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = CHANNEL_HOST + scrapedurl
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
@@ -224,11 +223,12 @@ def findvideos(item):
logger.info()
itemlist=[]
data = httptools.downloadpage(item.url).data
patron = 'class="title">.*?src.*?/>([^>]+)</span>.*?data-type="([^"]+).*?data-post="(\d+)".*?data-nume="(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.info("Intel66")
#scrapertools.printMatches(matches)
for language, tp, pt, nm in matches:
patron = 'tooltipctx.*?data-type="([^"]+).*?'
patron += 'data-post="(\d+)".*?'
patron += 'data-nume="(\d+).*?'
patron += 'class="title">.*?src.*?/>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for tp, pt, nm, language in matches:
language = language.strip()
post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
post = urllib.urlencode(post)
@@ -242,17 +242,12 @@ def findvideos(item):
else:
title = ''
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
#logger.info("Intel33 %s" %url)
url = get_url(url)
if "mega" not in url and "mediafire" not in url:
url = get_url(url.replace('\\/', '/'))
if url:
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
#logger.info("Intel44")
#scrapertools.printMatches(itemlist)
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.info("Intel66a")
#scrapertools.printMatches(matches)
matches = scrapertools.find_multiple_matches(data, patron)
for hidden_url, quality, language in matches:
if not config.get_setting('unify'):
title = ' [%s][%s]' % (quality, IDIOMAS[language])
@@ -260,27 +255,32 @@ def findvideos(item):
title = ''
new_data = httptools.downloadpage(hidden_url).data
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
url = url.replace('\\/', '/')
url = get_url(url)
if "mega" not in url and "mediafire" not in url:
url = get_url(url.replace('\\/', '/'))
if url:
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
#logger.info("Intel55")
#scrapertools.printMatches(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda it: (it.language, it.server, it.quality))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
return itemlist
def get_url(url):
logger.info()
if "cinetux.me" in url:
d1 = httptools.downloadpage(url).data
if "mail" in url:
id = scrapertools.find_single_match(d1, '<img src="[^#]+#(\w+)')
#logger.info("Intel77b %s" %id)
url = "https://my.mail.ru/video/embed/" + id
if "mail" in url or "drive" in url or "ok.cinetux" in url or "mp4/" in url:
id = scrapertools.find_single_match(d1, '<img src="[^#]+#([^"]+)"')
d1 = d1.replace("'",'"')
url = scrapertools.find_single_match(d1, '<iframe src="([^"]+)') + id
if "drive" in url:
url += "/preview"
else:
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
#logger.info("Intel22a %s" %d1)
#logger.info("Intel77a %s" %url)
url = url.replace("povwideo","powvideo")
return url
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -160,26 +160,19 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "onclick=\"changeLink('" in data:
patron = "onclick=.changeLink\('([^']+)'"
matches = scrapertools.find_multiple_matches(data, patron)
for id in matches:
url = devuelve_enlace(base64.b64decode(id))
itemlist.append(item.clone(title="Ver en %s",url=url, action="play"))
else:
patron = 'data-type="([^"]+).*?'
patron += 'data-post="([^"]+).*?'
patron += 'data-nume="([^"]+).*?'
patron += 'server">([^<]+).*?'
matches = scrapertools.find_multiple_matches(data, patron)
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
url1 = devuelve_enlace(url1)
if url1:
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
patron = 'data-type="(tv).*?'
patron += 'data-post="([^"]+).*?'
patron += 'data-nume="([^"]+).*?'
patron += 'server">([^<]+).*?'
matches = scrapertools.find_multiple_matches(data, patron)
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
url1 = devuelve_enlace(url1)
if url1:
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':

View File

@@ -143,10 +143,12 @@ def series_menu(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer, 'x-requested-with': 'XMLHttpRequest'}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -173,7 +175,7 @@ def list_all (item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
url = host+scrapedurl+'p001/'
url = host+scrapedurl
thumbnail = scrapedthumbnail
contentTitle=scrapedtitle
title = contentTitle
@@ -349,16 +351,15 @@ def season_episodes(item):
def get_links_by_language(item, data):
logger.info()
video_list = []
language = scrapertools.find_single_match(data, 'ul id=level\d_(.*?)\s*class=')
patron = 'data-source=(.*?)data.*?srt=(.*?)data-iframe.*?Opci.*?<.*?hidden>[^\(]\((.*?)\)'
language = scrapertools.find_single_match(data, 'ul id="level\d_([^"]+)"\s*class=')
patron = 'data-source="([^"]+)"data-quality="([^"]+)"data-srt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if language in IDIOMAS:
language = IDIOMAS[language]
for url, sub, quality in matches:
for url, quality, sub in matches:
if 'http' not in url:
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
@@ -391,15 +392,19 @@ def findvideos(item):
logger.info()
itemlist = []
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.contentType == 'movie':
new_url = new_url = item.url.replace('/pelicula/', '/player/%s/' % item.contentType)
else:
base_url = scrapertools.find_single_match(item.url, '(.*?)/temporada')
new_url = base_url.replace('/serie/', '/player/serie/')
new_url += '|%s|%s/' % (item.contentSeason, item.contentEpisodeNumber)
data = get_source(new_url, referer=item.url)
patron_language ='(<ul id=level\d_.*?\s*class=.*?ul>)'
patron_language ='(<ul id="level\d_.*?"*class=.*?ul>)'
matches = re.compile(patron_language, re.DOTALL).findall(data)
for language in matches:
video_list.extend(get_links_by_language(item, language))
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % (i.server.capitalize(), i.language,
i.quality) )
# Requerido para FilterTools

View File

@@ -1,22 +0,0 @@
{
"id": "planetadocumental",
"name": "Planeta documental",
"language": ["*"],
"active": true,
"adult": false,
"thumbnail": "https://s8.postimg.cc/r6njedwdt/planeta_documental1.png",
"banner": "https://s8.postimg.cc/6za3m36m9/planeta_documental2.png",
"categories": [
"documentary"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,142 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel Planeta Documental -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from channelselector import get_thumb
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
IDIOMAS = {"Latino": "LAT"}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['gvideo']
host = "https://www.planetadocumental.com"
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Últimos documentales", action="lista",
url= host,
thumbnail=get_thumb('lastest', auto=True)))
itemlist.append(item.clone(title="Por genero", action="generos",
url= host, thumbnail=get_thumb('genres', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'sub-menu elementor-nav-menu--dropdown(.*?)</ul')
patron = 'href="([^"]+).*?'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(
action = "sub_list",
title = scrapedtitle,
url = scrapedurl,
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'Ver Online.*?iframe src="([^"]+)')
if "/gd/" in url:
data = httptools.downloadpage(url).data
data = data.replace("file:",'"file":')
url = scrapertools.find_single_match(data, 'source.*?file":\s*"([^"]+)')
itemlist.append(item.clone(
action = "play",
server = "directo",
title = "Ver video " + item.title,
url = url
))
else:
if url:
itemlist.append(item.clone(
action = "play",
title = "Ver video " + item.title,
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'post__thumbnail__link.*?src="([^"]+).*?'
patron += 'href="([^"]+).*?'
patron += '>([^<]+).*?'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action = "findvideos",
contentTitle = scrapedtitle.strip(),
title = scrapedtitle.strip(),
url = scrapedurl,
thumbnail = scrapedthumbnail
))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "/?s=" + texto
item.extra = "busqueda"
try:
return sub_list(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_list(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'post-thumb-img-content post-thumb.*?src="([^"]+).*?'
patron += 'href="([^"]+).*?'
patron += '>([^<]+).*?'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action = "findvideos",
contentTitle = scrapedtitle,
title = scrapedtitle.strip(),
url = scrapedurl,
thumbnail = scrapedthumbnail
))
return itemlist

View File

@@ -81,6 +81,7 @@ def peliculas(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches:
plot = scrapertools.decodeHtmlentities(plot)
itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(),
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail,
@@ -167,13 +168,14 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<iframe src="([^"]+)".*?webkitallowfullscreen="true" mozallowfullscreen="true"></iframe>'
patron = '<iframe src="[^"]+".*?<iframe src="([^"]+)" scrolling="no" frameborder="0"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
server = servertools.get_server_from_url(url)
title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server
title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server.title()
itemlist.append(item.clone(action='play', title=title, server=server, url=url))

View File

@@ -22,8 +22,7 @@ def mainlist(item):
def search(item, texto):
logger.info("texto = %s" %(texto))
item.url = urljoin(HOST, "search&q=" + texto)
item.url = urljoin(HOST, "search?q=" + texto)
try:
return links(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -33,30 +32,29 @@ def search(item, texto):
logger.error("%s" % line)
return []
def categories(item):
logger.info()
data = httptools.downloadpage(item.url).data
result = []
categories = re.findall("href=[\"'](?P<url>/search[^\"']+).*?>(?P<name>[^<>]+)</div>.*?badge[^>]+>(?P<counter>\d+)", data, re.DOTALL | re.MULTILINE)
for url, name, counter in categories:
result.append(item.clone(action = "links", title = "%s (%s videos)" % (name, counter), url = urljoin(item.url, url)))
return result
def get_page(url):
page = re.search("p=(\d+)", url)
if page:
return int(page.group(1))
return 1
def get_page_url(url, page):
logger.debug("URL: %s to page %d" % (url, page))
resultURL = re.sub("([&\?]p=)(?:\d+)", "\g<1>%d" % page, url)
if resultURL == url:
resultURL += ("&" if "?" in url else "?") + "p=%d" % (page)
logger.debug("Result: %s" % (resultURL))
return resultURL
@@ -64,21 +62,15 @@ def get_page_url(url, page):
def links(item):
logger.info()
data = httptools.downloadpage(item.url).data
reExpr = "<img\s+src=['\"](?P<img>[^'\"]+)[^>]+(?:title|alt)[^'\"]*['\"](?P<title>[^\"]+)[^>]+id[^'\"]*['\"](?P<id>[^'\"]+)[^>]*>(?:[^<]*<[^>]+>(?P<quality>[^<]+)<)?[^<]*<[^>]*duration[^>]*>(?P<duration>[^<]+)"
reResults = re.findall(reExpr, data, re.MULTILINE | re.DOTALL)
result = []
for img, title, vID, quality, duration in reResults:
logger.info("[link] %(title)s [%(quality)s] [%(duration)s]: %(vid)s (%(img)s" % ({"title": title, "duration": duration, "vid": vID, "img": img, "quality": quality if quality else "--"}))
formattedQuality = ""
if quality:
formattedQuality += " [%s]" % (quality)
titleFormatted = "%(title)s%(quality)s [%(duration)s]" % ({"title": title, "quality": formattedQuality, "duration": duration})
result.append(item.clone(action = "play", title = titleFormatted, url = urljoin(item.url, "/view/%s" % (vID)), thumbnail = urljoin(item.url, img), vID = vID))
# Has pagination
paginationOccurences = data.count('class="prevnext"')
if paginationOccurences:
@@ -86,13 +78,11 @@ def links(item):
logger.info("Page " + str(page) + " Ocurrences: " + str(paginationOccurences))
if page > 1:
result.append(item.clone(action = "links", title = "<< Anterior", url = get_page_url(item.url, page - 1)))
if paginationOccurences > 1 or page == 1:
result.append(item.clone(action = "links", title = "Siguiente >>", url = get_page_url(item.url, page + 1)))
return result
def play(item):
logger.info(item)
embededURL = urljoin(item.url, "/view/%s" % (item.vID))

View File

@@ -14,7 +14,7 @@ import urllib
from base64 import b64decode
from core import httptools
from platformcode import config
from platformcode import config, logger
def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
@@ -355,7 +355,6 @@ class UnshortenIt(object):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
@@ -366,8 +365,9 @@ class UnshortenIt(object):
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
if config.is_xbmc():
import xbmc
xbmc.sleep(5 * 1000)
else:
time.sleep(5 * 1000)

View File

@@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?data-res="([^"]+)')
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?title="([^"]+)')
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
if "loadthumb" in scrapedurl:

View File

@@ -3,33 +3,27 @@
# Conector DoStream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Dostream] El archivo no existe o ha sido borrado"
return False, "[Dostream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
patron = "(?:'src'|'url'):'(http.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(['dostream',url])
data = httptools.downloadpage(page_url, headers={"Referer":page_url}).data
patron = '"label":"([^"]+)".*?'
patron += '"src":"(http.*?)".*?'
matches = scrapertools.find_multiple_matches(data, patron)
for label, url in matches:
video_urls.append(['%s [dostream]' %label, url])
video_urls.sort(key=lambda it: int(it[0].split("p ")[0]))
return video_urls

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "streamplay.to/(?:embed-|player-|)([a-z0-9]+)(?:.html|)",
"pattern": "streamplay.(?:to|me)/(?:embed-|player-|)([a-z0-9]+)(?:.html|)",
"url": "http://streamplay.to/player-\\1.html"
}
]