Actualizados
cinetux: fix enlaces danimados: fix enlaces de películas planetadocumental: Eliminados, solo tiene archivos .rar xms: fix enlaces descargacineclasico: fix enlaces bitp: fix enlaces dostream: fix enlaces
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
@@ -176,11 +175,11 @@ def destacadas(item):
|
||||
item.text_color = color2
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="letter_home"')
|
||||
patron = '(?s)title="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron = '(?s)href="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedurl = CHANNEL_HOST + scrapedurl
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -224,11 +223,12 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="title">.*?src.*?/>([^>]+)</span>.*?data-type="([^"]+).*?data-post="(\d+)".*?data-nume="(\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#logger.info("Intel66")
|
||||
#scrapertools.printMatches(matches)
|
||||
for language, tp, pt, nm in matches:
|
||||
patron = 'tooltipctx.*?data-type="([^"]+).*?'
|
||||
patron += 'data-post="(\d+)".*?'
|
||||
patron += 'data-nume="(\d+).*?'
|
||||
patron += 'class="title">.*?src.*?/>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for tp, pt, nm, language in matches:
|
||||
language = language.strip()
|
||||
post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
|
||||
post = urllib.urlencode(post)
|
||||
@@ -242,17 +242,12 @@ def findvideos(item):
|
||||
else:
|
||||
title = ''
|
||||
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
#logger.info("Intel33 %s" %url)
|
||||
url = get_url(url)
|
||||
if "mega" not in url and "mediafire" not in url:
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
#logger.info("Intel44")
|
||||
#scrapertools.printMatches(itemlist)
|
||||
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#logger.info("Intel66a")
|
||||
#scrapertools.printMatches(matches)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for hidden_url, quality, language in matches:
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [%s][%s]' % (quality, IDIOMAS[language])
|
||||
@@ -260,27 +255,32 @@ def findvideos(item):
|
||||
title = ''
|
||||
new_data = httptools.downloadpage(hidden_url).data
|
||||
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
|
||||
url = url.replace('\\/', '/')
|
||||
url = get_url(url)
|
||||
if "mega" not in url and "mediafire" not in url:
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
#logger.info("Intel55")
|
||||
#scrapertools.printMatches(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist.sort(key=lambda it: (it.language, it.server, it.quality))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_url(url):
|
||||
logger.info()
|
||||
if "cinetux.me" in url:
|
||||
d1 = httptools.downloadpage(url).data
|
||||
if "mail" in url:
|
||||
id = scrapertools.find_single_match(d1, '<img src="[^#]+#(\w+)')
|
||||
#logger.info("Intel77b %s" %id)
|
||||
url = "https://my.mail.ru/video/embed/" + id
|
||||
if "mail" in url or "drive" in url or "ok.cinetux" in url or "mp4/" in url:
|
||||
id = scrapertools.find_single_match(d1, '<img src="[^#]+#([^"]+)"')
|
||||
d1 = d1.replace("'",'"')
|
||||
url = scrapertools.find_single_match(d1, '<iframe src="([^"]+)') + id
|
||||
if "drive" in url:
|
||||
url += "/preview"
|
||||
else:
|
||||
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
|
||||
#logger.info("Intel22a %s" %d1)
|
||||
#logger.info("Intel77a %s" %url)
|
||||
url = url.replace("povwideo","powvideo")
|
||||
return url
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
@@ -160,26 +160,19 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "onclick=\"changeLink('" in data:
|
||||
patron = "onclick=.changeLink\('([^']+)'"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for id in matches:
|
||||
url = devuelve_enlace(base64.b64decode(id))
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url, action="play"))
|
||||
else:
|
||||
patron = 'data-type="([^"]+).*?'
|
||||
patron += 'data-post="([^"]+).*?'
|
||||
patron += 'data-nume="([^"]+).*?'
|
||||
patron += 'server">([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
|
||||
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
|
||||
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
|
||||
url1 = devuelve_enlace(url1)
|
||||
if url1:
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
|
||||
patron = 'data-type="(tv).*?'
|
||||
patron += 'data-post="([^"]+).*?'
|
||||
patron += 'data-nume="([^"]+).*?'
|
||||
patron += 'server">([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
|
||||
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
|
||||
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
|
||||
url1 = devuelve_enlace(url1)
|
||||
if url1:
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "planetadocumental",
|
||||
"name": "Planeta documental",
|
||||
"language": ["*"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://s8.postimg.cc/r6njedwdt/planeta_documental1.png",
|
||||
"banner": "https://s8.postimg.cc/6za3m36m9/planeta_documental2.png",
|
||||
"categories": [
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Planeta Documental -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from channelselector import get_thumb
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
IDIOMAS = {"Latino": "LAT"}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = []
|
||||
|
||||
list_servers = ['gvideo']
|
||||
|
||||
host = "https://www.planetadocumental.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist.append(item.clone(title="Últimos documentales", action="lista",
|
||||
url= host,
|
||||
thumbnail=get_thumb('lastest', auto=True)))
|
||||
itemlist.append(item.clone(title="Por genero", action="generos",
|
||||
url= host, thumbnail=get_thumb('genres', auto=True)))
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'sub-menu elementor-nav-menu--dropdown(.*?)</ul')
|
||||
patron = 'href="([^"]+).*?'
|
||||
patron += '>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(
|
||||
action = "sub_list",
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl,
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, 'Ver Online.*?iframe src="([^"]+)')
|
||||
if "/gd/" in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
data = data.replace("file:",'"file":')
|
||||
url = scrapertools.find_single_match(data, 'source.*?file":\s*"([^"]+)')
|
||||
itemlist.append(item.clone(
|
||||
action = "play",
|
||||
server = "directo",
|
||||
title = "Ver video " + item.title,
|
||||
url = url
|
||||
))
|
||||
else:
|
||||
if url:
|
||||
itemlist.append(item.clone(
|
||||
action = "play",
|
||||
title = "Ver video " + item.title,
|
||||
url = url
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'post__thumbnail__link.*?src="([^"]+).*?'
|
||||
patron += 'href="([^"]+).*?'
|
||||
patron += '>([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
contentTitle = scrapedtitle.strip(),
|
||||
title = scrapedtitle.strip(),
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if texto != "":
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=" + texto
|
||||
item.extra = "busqueda"
|
||||
try:
|
||||
return sub_list(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def sub_list(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'post-thumb-img-content post-thumb.*?src="([^"]+).*?'
|
||||
patron += 'href="([^"]+).*?'
|
||||
patron += '>([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
title = scrapedtitle.strip(),
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail
|
||||
))
|
||||
return itemlist
|
||||
@@ -81,6 +81,7 @@ def peliculas(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches:
|
||||
plot = scrapertools.decodeHtmlentities(plot)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(),
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail,
|
||||
@@ -167,13 +168,14 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<iframe src="([^"]+)".*?webkitallowfullscreen="true" mozallowfullscreen="true"></iframe>'
|
||||
patron = '<iframe src="[^"]+".*?<iframe src="([^"]+)" scrolling="no" frameborder="0"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url in matches:
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server
|
||||
title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server.title()
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, server=server, url=url))
|
||||
|
||||
|
||||
@@ -22,8 +22,7 @@ def mainlist(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("texto = %s" %(texto))
|
||||
|
||||
item.url = urljoin(HOST, "search&q=" + texto)
|
||||
item.url = urljoin(HOST, "search?q=" + texto)
|
||||
try:
|
||||
return links(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -33,30 +32,29 @@ def search(item, texto):
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categories(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
result = []
|
||||
|
||||
categories = re.findall("href=[\"'](?P<url>/search[^\"']+).*?>(?P<name>[^<>]+)</div>.*?badge[^>]+>(?P<counter>\d+)", data, re.DOTALL | re.MULTILINE)
|
||||
for url, name, counter in categories:
|
||||
result.append(item.clone(action = "links", title = "%s (%s videos)" % (name, counter), url = urljoin(item.url, url)))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_page(url):
|
||||
page = re.search("p=(\d+)", url)
|
||||
if page:
|
||||
return int(page.group(1))
|
||||
return 1
|
||||
|
||||
|
||||
def get_page_url(url, page):
|
||||
logger.debug("URL: %s to page %d" % (url, page))
|
||||
resultURL = re.sub("([&\?]p=)(?:\d+)", "\g<1>%d" % page, url)
|
||||
if resultURL == url:
|
||||
resultURL += ("&" if "?" in url else "?") + "p=%d" % (page)
|
||||
|
||||
logger.debug("Result: %s" % (resultURL))
|
||||
return resultURL
|
||||
|
||||
@@ -64,21 +62,15 @@ def get_page_url(url, page):
|
||||
def links(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
reExpr = "<img\s+src=['\"](?P<img>[^'\"]+)[^>]+(?:title|alt)[^'\"]*['\"](?P<title>[^\"]+)[^>]+id[^'\"]*['\"](?P<id>[^'\"]+)[^>]*>(?:[^<]*<[^>]+>(?P<quality>[^<]+)<)?[^<]*<[^>]*duration[^>]*>(?P<duration>[^<]+)"
|
||||
reResults = re.findall(reExpr, data, re.MULTILINE | re.DOTALL)
|
||||
result = []
|
||||
|
||||
for img, title, vID, quality, duration in reResults:
|
||||
logger.info("[link] %(title)s [%(quality)s] [%(duration)s]: %(vid)s (%(img)s" % ({"title": title, "duration": duration, "vid": vID, "img": img, "quality": quality if quality else "--"}))
|
||||
|
||||
formattedQuality = ""
|
||||
if quality:
|
||||
formattedQuality += " [%s]" % (quality)
|
||||
|
||||
titleFormatted = "%(title)s%(quality)s [%(duration)s]" % ({"title": title, "quality": formattedQuality, "duration": duration})
|
||||
result.append(item.clone(action = "play", title = titleFormatted, url = urljoin(item.url, "/view/%s" % (vID)), thumbnail = urljoin(item.url, img), vID = vID))
|
||||
|
||||
# Has pagination
|
||||
paginationOccurences = data.count('class="prevnext"')
|
||||
if paginationOccurences:
|
||||
@@ -86,13 +78,11 @@ def links(item):
|
||||
logger.info("Page " + str(page) + " Ocurrences: " + str(paginationOccurences))
|
||||
if page > 1:
|
||||
result.append(item.clone(action = "links", title = "<< Anterior", url = get_page_url(item.url, page - 1)))
|
||||
|
||||
if paginationOccurences > 1 or page == 1:
|
||||
result.append(item.clone(action = "links", title = "Siguiente >>", url = get_page_url(item.url, page + 1)))
|
||||
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info(item)
|
||||
embededURL = urljoin(item.url, "/view/%s" % (item.vID))
|
||||
|
||||
@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
|
||||
|
||||
|
||||
class UnshortenIt(object):
|
||||
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net'
|
||||
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net'
|
||||
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
|
||||
_adfocus_regex = r'adfoc\.us'
|
||||
_lnxlu_regex = r'lnx\.lu'
|
||||
|
||||
@@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?data-res="([^"]+)')
|
||||
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?title="([^"]+)')
|
||||
scrapertools.printMatches(videourl)
|
||||
for scrapedurl, scrapedquality in videourl:
|
||||
if "loadthumb" in scrapedurl:
|
||||
|
||||
@@ -3,33 +3,27 @@
|
||||
# Conector DoStream By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[Dostream] El archivo no existe o ha sido borrado"
|
||||
|
||||
return False, "[Dostream] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
logger.debug(data)
|
||||
patron = "(?:'src'|'url'):'(http.*?)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url in matches:
|
||||
video_urls.append(['dostream',url])
|
||||
|
||||
data = httptools.downloadpage(page_url, headers={"Referer":page_url}).data
|
||||
patron = '"label":"([^"]+)".*?'
|
||||
patron += '"src":"(http.*?)".*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for label, url in matches:
|
||||
video_urls.append(['%s [dostream]' %label, url])
|
||||
video_urls.sort(key=lambda it: int(it[0].split("p ")[0]))
|
||||
return video_urls
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "streamplay.to/(?:embed-|player-|)([a-z0-9]+)(?:.html|)",
|
||||
"pattern": "streamplay.(?:to|me)/(?:embed-|player-|)([a-z0-9]+)(?:.html|)",
|
||||
"url": "http://streamplay.to/player-\\1.html"
|
||||
}
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user