Actualizados
- asialiveaction: Corrección por cambio de estructura. - danimados: Corrección por cambio de estructura. - hdfull: Corrección en la búsqueda. - Actualización de código. - uploadmp4: Nuevo servidor.
This commit is contained in:
@@ -10,6 +10,7 @@ from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from lib import jsunpack
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
@@ -26,13 +27,13 @@ def mainlist(item):
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
|
||||
url=urlparse.urljoin(host, "/category/pelicula"), type='pl', pag=1))
|
||||
#itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
# url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
|
||||
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/?s="))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -42,8 +43,10 @@ def category(item):
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(host).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
action = "lista"
|
||||
if item.cat == 'abc':
|
||||
data = scrapertools.find_single_match(data, '<div class="Body Container">(.+?)<main>')
|
||||
action = "lista_a"
|
||||
elif item.cat == 'genre':
|
||||
data = scrapertools.find_single_match(data, '<a>Géneros<\/a><ul class="sub.menu">(.+?)<a>Año<\/a>')
|
||||
elif item.cat == 'year':
|
||||
@@ -54,7 +57,8 @@ def category(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', pag=0))
|
||||
if not scrapedurl.startswith("http"): scrapedurl = host + scrapedurl
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, type='cat', pag=0))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -62,8 +66,6 @@ def search_results(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info(data)
|
||||
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
|
||||
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -84,16 +86,55 @@ def search_results(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
item.pag = 0
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
return lista(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace('"ep0','"epp"')
|
||||
patron = '(?is)MvTbImg B.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'span>Episodio ([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedepi in matches:
|
||||
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
|
||||
#urls = scrapertools.find_multiple_matches(scrapedurls, 'href="([^"]+)')
|
||||
itemlist.append(item.clone(action='findvideos', title=title, url=scrapedurl, thumbnail=scrapedthumbnail, type=item.type,
|
||||
infoLabels=item.infoLabels))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
|
||||
url=item.url, action="add_serie_to_library", extra="episodios",
|
||||
contentSerieName=item.contentSerieName))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista_a(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?is)Num">.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?>.*?'
|
||||
patron += '<strong>([^<]+)<.*?'
|
||||
patron += '<td>([^<]+)<.*?'
|
||||
patron += 'href.*?>([^"]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
action = "findvideos"
|
||||
if "Serie" in scrapedtype: action = "episodios"
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, contentTitle=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
infoLabels={'year':scrapedyear}))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
next = True
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -106,14 +147,12 @@ def lista(item):
|
||||
patron += '<span.*?>([^"]+)<\/span>.+?' #scrapedyear
|
||||
patron += '<a.+?>([^"]+)<\/a>' #scrapedtype
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
title="%s - %s" % (scrapedtitle,scrapedyear)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
type=scrapedtype, infoLabels={'year':scrapedyear})
|
||||
|
||||
if scrapedtype == 'sr':
|
||||
if scrapedtype == 'Serie':
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
new_item.action = 'episodios'
|
||||
else:
|
||||
@@ -135,39 +174,45 @@ def lista(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if not item.urls:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
matches = scrapertools.find_multiple_matches(data, 'http://www.sutorimu[^"]+')
|
||||
else:
|
||||
matches = item.urls
|
||||
for url in matches:
|
||||
if "spotify" in url:
|
||||
data = httptools.downloadpage(item.url).data.replace(""",'"').replace("amp;","").replace("#038;","")
|
||||
matches = scrapertools.find_multiple_matches(data, 'TPlayerTb.*?id="([^"]+)".*?src="([^"]+)"')
|
||||
matches_del = scrapertools.find_multiple_matches(data, '(?is)<!--<td>.*?-->')
|
||||
# Borra los comentarios - que contienen enlaces duplicados
|
||||
for del_m in matches_del:
|
||||
data = data.replace(del_m, "")
|
||||
# Primer grupo de enlaces
|
||||
for id, url1 in matches:
|
||||
language = scrapertools.find_single_match(data, '(?is)data-tplayernv="%s".*?span><span>([^<]+)' %id)
|
||||
data1 = httptools.downloadpage(url1).data
|
||||
url = scrapertools.find_single_match(data1, 'src="([^"]+)')
|
||||
if "a-x" in url:
|
||||
data1 = httptools.downloadpage(url, headers={"Referer":url1}).data
|
||||
url = scrapertools.find_single_match(data1, 'src: "([^"]+)"')
|
||||
if "embed.php" not in url:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url))
|
||||
continue
|
||||
data = httptools.downloadpage(url).data
|
||||
language = scrapertools.find_single_match(data, '(?:ɥɔɐәlq|lɐʇәɯllnɟ) (\w+)')
|
||||
if not language: language = "VOS"
|
||||
bloque = scrapertools.find_single_match(data, "description articleBody(.*)/div")
|
||||
urls = scrapertools.find_multiple_matches(bloque, "iframe src='([^']+)")
|
||||
if urls:
|
||||
# cuando es streaming
|
||||
for url1 in urls:
|
||||
if "luis" in url1:
|
||||
data = httptools.downloadpage(url1).data
|
||||
url1 = scrapertools.find_single_match(data, 'file: "([^"]+)')
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url1))
|
||||
else:
|
||||
# cuando es descarga
|
||||
bloque = bloque.replace('"',"'")
|
||||
urls = scrapertools.find_multiple_matches(bloque, "href='([^']+)")
|
||||
for url2 in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url2))
|
||||
if "data-video" in bloque:
|
||||
urls = scrapertools.find_multiple_matches(bloque, "data-video='([^']+)")
|
||||
for url2 in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = "https://tinyurl.com/%s" %url2 ))
|
||||
for item1 in itemlist:
|
||||
if "tinyurl" in item1.url:
|
||||
item1.url = httptools.downloadpage(item1.url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
data1 = httptools.downloadpage(url).data
|
||||
packed = scrapertools.find_single_match(data1, "(?is)eval\(function\(p,a,c,k,e.*?</script>")
|
||||
unpack = jsunpack.unpack(packed)
|
||||
urls = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+).*?label":"([^"]+)')
|
||||
for url2, quality in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
# Segundo grupo de enlaces
|
||||
matches = scrapertools.find_multiple_matches(data, '<span><a rel="nofollow" target="_blank" href="([^"]+)"')
|
||||
for url in matches:
|
||||
data1 = httptools.downloadpage(url).data
|
||||
matches1 = scrapertools.find_multiple_matches(data1, '"ser".*?</tr>')
|
||||
for ser in matches1:
|
||||
ser = ser.replace("×","x")
|
||||
aud = scrapertools.find_single_match(ser, 'aud"><i class="([^"]+)')
|
||||
sub = scrapertools.find_single_match(ser, 'sub"><i class="([^"]+)')
|
||||
quality = scrapertools.find_single_match(ser, 'res">.*?x([^<]+)')
|
||||
language = "Versión RAW"
|
||||
if aud == "jp" and sub == "si":
|
||||
language = "Sub. Español"
|
||||
matches2 = scrapertools.find_multiple_matches(ser, 'href="([^"]+)')
|
||||
for url2 in matches2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
@@ -28,7 +28,7 @@ def mainlist(item):
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/",
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/", extra="Peliculas Animadas",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "?s=",
|
||||
thumbnail=thumb_series))
|
||||
@@ -48,10 +48,10 @@ def sub_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)class="thumbnail animation-.*?href="([^"]+).*?'
|
||||
patron += 'img src="([^"]+).*?'
|
||||
patron += 'alt="([^"]+).*?'
|
||||
patron += 'class="meta"(.*?)class="contenido"'
|
||||
patron = '(?s)class="thumbnail animation-.*?href=([^>]+).*?'
|
||||
patron += 'src=(.*?(?:jpg|jpeg)).*?'
|
||||
patron += 'alt=(?:"|)(.*?)(?:"|>).*?'
|
||||
patron += 'class=year>(.*?)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
scrapedyear = scrapertools.find_single_match(scrapedyear, 'class="year">(\d{4})')
|
||||
@@ -105,26 +105,28 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
if item.title=="Peliculas Animadas":
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<div id="archive-content" class="animation-2 items">(.*)<a href=\'')
|
||||
if item.extra == "Peliculas Animadas":
|
||||
data_lista = scrapertools.find_single_match(data, '(?is)archive-content(.*?)class=pagination')
|
||||
else:
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<divclass=items><article(.+?)<\/div><\/article><\/div>')
|
||||
patron = '<imgsrc=([^"]+) alt="([^"]+)">.+?<ahref=([^"]+)><divclass=see>.+?<divclass=texto>(.+?)<\/div>'
|
||||
data_lista = scrapertools.find_single_match(data, 'class=items><article(.+?)<\/div><\/article><\/div>')
|
||||
patron = '(?is)src=(.*?(?:jpg|jpeg)).*?'
|
||||
patron += 'alt=(?:"|)(.*?)(?:"|>).*?'
|
||||
patron += 'href=([^>]+)>.*?'
|
||||
patron += 'title.*?<span>([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron)
|
||||
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
|
||||
if item.title=="Peliculas Animadas":
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
|
||||
plot=scrapedplot, action="findvideos", show=scrapedtitle))
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
|
||||
action="findvideos", contentTitle=scrapedtitle, infoLabels={'year':scrapedyear}))
|
||||
else:
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
|
||||
if item.title!="Peliculas Animadas":
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
context=autoplay.context,action="episodios", contentSerieName=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
next_page = scrapertools.find_single_match(data, 'rel=next href=([^>]+)>')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(action="lista", title="Página siguiente>>", url=next_page, extra=item.extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -163,7 +165,7 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'player-option-\d+.*?'
|
||||
patron += 'data-sv="([^"]+).*?'
|
||||
patron += 'data-sv=(\w+).*?'
|
||||
patron += 'data-user="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
@@ -172,8 +174,6 @@ def findvideos(item):
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
|
||||
url = base64.b64decode(scrapertools.find_single_match(data1, '<iframe data-source="([^"]+)"'))
|
||||
url1 = devuelve_enlace(url)
|
||||
if "drive.google" in url1:
|
||||
url1 = url1.replace("view","preview")
|
||||
if url1:
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
@@ -196,11 +196,5 @@ def devuelve_enlace(url1):
|
||||
url = 'https:' + url1
|
||||
new_data = httptools.downloadpage(url).data
|
||||
new_data = new_data.replace('"',"'")
|
||||
url1 = scrapertools.find_single_match(new_data, "iframe src='([^']+)")
|
||||
new_data = httptools.downloadpage(url1).data
|
||||
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
|
||||
if "zkstream" in url or "cloudup" in url:
|
||||
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
else:
|
||||
url1 = url
|
||||
url1 = scrapertools.find_single_match(new_data, "sources.*?file: '([^']+)")
|
||||
return url1
|
||||
|
||||
@@ -16,6 +16,7 @@ from channelselector import get_thumb
|
||||
|
||||
host = "https://hdfull.me"
|
||||
|
||||
|
||||
if config.get_setting('hdfulluser', 'hdfull'):
|
||||
account = True
|
||||
else:
|
||||
@@ -128,10 +129,11 @@ def menuseries(item):
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
data = agrupa_datos(httptools.downloadpage(host).data)
|
||||
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
|
||||
sid = scrapertools.find_single_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
|
||||
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
|
||||
item.title = "Buscar..."
|
||||
item.url = host + "/buscar"
|
||||
item.texto = texto
|
||||
try:
|
||||
return fichas(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -246,7 +248,7 @@ def fichas(item):
|
||||
if len(s_p) == 1:
|
||||
data = s_p[0]
|
||||
if 'Lo sentimos</h3>' in s_p[0]:
|
||||
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20',
|
||||
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + item.texto.replace('%20',
|
||||
' ') + "[/COLOR] sin resultados")]
|
||||
else:
|
||||
data = s_p[0] + s_p[1]
|
||||
|
||||
@@ -10,15 +10,6 @@ from core import httptools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def cache_page(url, post=None, headers=None, modo_cache=None, timeout=None):
|
||||
return cachePage(url, post, headers, modo_cache, timeout=timeout)
|
||||
|
||||
|
||||
def cachePage(url, post=None, headers=None, modoCache=None, timeout=None):
|
||||
data = downloadpage(url, post=post, headers=headers, timeout=timeout)
|
||||
return data
|
||||
|
||||
|
||||
def downloadpage(url, post=None, headers=None, follow_redirects=True, timeout=None, header_to_get=None):
|
||||
response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
|
||||
timeout=timeout)
|
||||
|
||||
42
plugin.video.alfa/servers/uploadmp4.json
Normal file
42
plugin.video.alfa/servers/uploadmp4.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://(?:www\\.|)uploadmp4.com/embed/[A-z0-9]+)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "uploadmp4",
|
||||
"name": "uploadmp4",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://www.uploadmp4.com/themes/flow/images/main_logo.png"
|
||||
}
|
||||
27
plugin.video.alfa/servers/uploadmp4.py
Normal file
27
plugin.video.alfa/servers/uploadmp4.py
Normal file
@@ -0,0 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector uploadmp4 By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data:
|
||||
return False, "[uploadmp4] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
videos_url = scrapertools.find_multiple_matches(data, 'label":"([^"]+).*?file":"([^"]+)')
|
||||
for quality, video in videos_url:
|
||||
video_urls.append(["%s [uploadmp4]" %quality, video])
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user