Actualizados
- asialiveaction: Corrección por cambio de estructura. - cinetux: Corrección para obtener enlaces. - gnula: Corrección de estructura y thumbs. - hdfilmologia: Corrección para obtener enlaces. - fembed: Corrección para obtener videos. - gvideo: Correción en patrón. - videobb: Nuevo server Actualización de código en algunos módulos.
This commit is contained in:
@@ -14,7 +14,7 @@ from lib import jsunpack
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
host = "http://www.asialiveaction.com"
|
||||
host = "https://asialiveaction.com"
|
||||
|
||||
IDIOMAS = {'Japones': 'Japones'}
|
||||
list_language = IDIOMAS.values()
|
||||
@@ -26,9 +26,9 @@ def mainlist(item):
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
|
||||
url=urlparse.urljoin(host, "/category/pelicula"), type='pl', pag=1))
|
||||
url=urlparse.urljoin(host, "/pelicula"), type='pl'))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
|
||||
url=urlparse.urljoin(host, "/serie"), type='sr'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
|
||||
@@ -58,7 +58,7 @@ def category(item):
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
if not scrapedurl.startswith("http"): scrapedurl = host + scrapedurl
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, type='cat', pag=0))
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, type='cat'))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -88,7 +88,6 @@ def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.pag = 0
|
||||
if texto != '':
|
||||
return lista(item)
|
||||
|
||||
@@ -119,12 +118,13 @@ def lista_a(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?is)Num">.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?>.*?'
|
||||
patron += 'data-src="([^"]+)".*?>.*?'
|
||||
patron += '<strong>([^<]+)<.*?'
|
||||
patron += '<td>([^<]+)<.*?'
|
||||
patron += 'href.*?>([^"]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
action = "findvideos"
|
||||
if "Serie" in scrapedtype: action = "episodios"
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, contentTitle=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -140,14 +140,14 @@ def lista(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<article .*?">'
|
||||
patron += '<a href="([^"]+)"><.*?><figure.*?>' #scrapedurl
|
||||
patron += '<img.*?src="([^"]+)".*?>.*?' #scrapedthumbnail
|
||||
patron += '<h3 class=".*?">([^"]+)<\/h3>' #scrapedtitle
|
||||
patron += '<span.*?>([^"]+)<\/span>.+?' #scrapedyear
|
||||
patron += '<a.+?>([^"]+)<\/a>' #scrapedtype
|
||||
patron = '(?is)class="TPost C">.*?href="([^"]+)".*?' #scrapedurl
|
||||
patron += 'lazy-src="([^"]+)".*?>.*?' #scrapedthumbnail
|
||||
patron += 'title">([^<]+)<.*?' #scrapedtitle
|
||||
patron += 'year">([^<]+)<.*?' #scrapedyear
|
||||
patron += 'href.*?>([^"]+)<\/a>' #scrapedtype
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
title="%s - %s" % (scrapedtitle,scrapedyear)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -158,16 +158,12 @@ def lista(item):
|
||||
else:
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.action = 'findvideos'
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
#pagination
|
||||
pag = item.pag + 1
|
||||
url_next_page = item.url+"/page/"+str(pag)+"/"
|
||||
if len(itemlist)>19:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', pag=pag))
|
||||
url_next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)"')
|
||||
if len(itemlist)>0 and url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista'))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -189,14 +185,16 @@ def findvideos(item):
|
||||
data1 = httptools.downloadpage(url, headers={"Referer":url1}).data
|
||||
url = scrapertools.find_single_match(data1, 'src: "([^"]+)"')
|
||||
if "embed.php" not in url:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url))
|
||||
if url:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url))
|
||||
continue
|
||||
data1 = httptools.downloadpage(url).data
|
||||
packed = scrapertools.find_single_match(data1, "(?is)eval\(function\(p,a,c,k,e.*?</script>")
|
||||
unpack = jsunpack.unpack(packed)
|
||||
urls = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+).*?label":"([^"]+)')
|
||||
for url2, quality in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
if url2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
# Segundo grupo de enlaces
|
||||
matches = scrapertools.find_multiple_matches(data, '<span><a rel="nofollow" target="_blank" href="([^"]+)"')
|
||||
for url in matches:
|
||||
@@ -212,7 +210,8 @@ def findvideos(item):
|
||||
language = "Sub. Español"
|
||||
matches2 = scrapertools.find_multiple_matches(ser, 'href="([^"]+)')
|
||||
for url2 in matches2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
if url2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
@@ -242,7 +242,7 @@ def findvideos(item):
|
||||
else:
|
||||
title = ''
|
||||
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
url = get_url(url)
|
||||
if url:
|
||||
itemlist.append(item.clone(title ='%s'+title, url=url, action='play',
|
||||
language=IDIOMAS[language], text_color = ""))
|
||||
@@ -255,7 +255,7 @@ def findvideos(item):
|
||||
title = ''
|
||||
new_data = httptools.downloadpage(hidden_url).data
|
||||
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
url = get_url(url)
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels, text_color = ""))
|
||||
@@ -280,6 +280,7 @@ def findvideos(item):
|
||||
|
||||
def get_url(url):
|
||||
logger.info()
|
||||
url = url.replace('\\/', '/')
|
||||
if "cinetux.me" in url:
|
||||
d1 = httptools.downloadpage(url).data
|
||||
if "mail" in url or "drive" in url or "ok.cinetux" in url or "mp4/" in url:
|
||||
@@ -288,6 +289,8 @@ def get_url(url):
|
||||
url = scrapertools.find_single_match(d1, '<iframe src="([^"]+)') + id
|
||||
if "drive" in url:
|
||||
url += "/preview"
|
||||
if "FFFFFF" in url:
|
||||
url = scrapertools.find_single_match(d1, 'class="cta" href="([^"]+)"')
|
||||
else:
|
||||
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
|
||||
url = url.replace("povwideo","powvideo")
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
@@ -68,6 +69,7 @@ def sub_search(item):
|
||||
if "ver-" not in scrapedurl:
|
||||
continue
|
||||
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
|
||||
contentTitle = scrapedtitle.replace(scrapertools.find_single_match('\[.+', scrapedtitle),"")
|
||||
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
channel = item.channel,
|
||||
@@ -77,6 +79,7 @@ def sub_search(item):
|
||||
thumbnail = scrapedthumbnail,
|
||||
url = scrapedurl,
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -89,11 +92,11 @@ def generos(item):
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for genero, scrapedurl in matches:
|
||||
title = scrapertools.htmlclean(genero)
|
||||
url = item.url + scrapedurl
|
||||
if not item.url.startswith("http"): scrapedurl = item.url + scrapedurl
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = 'peliculas',
|
||||
title = title,
|
||||
url = url,
|
||||
url = scrapedurl,
|
||||
viewmode = "movie",
|
||||
first=0))
|
||||
itemlist = sorted(itemlist, key=lambda item: item.title)
|
||||
@@ -124,19 +127,21 @@ def peliculas(item):
|
||||
title = scrapedtitle + " " + plot
|
||||
if not scrapedurl.startswith("http"):
|
||||
scrapedurl = item.url + scrapedurl
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = 'findvideos',
|
||||
title = title,
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail,
|
||||
plot = plot,
|
||||
year = scrapertools.find_single_match(scrapedurl, "\-(\d{4})\-")
|
||||
contentTitle = scrapedtitle.replace(scrapertools.find_single_match('\[.+', scrapedtitle),"")
|
||||
itemlist.append(Item(action = 'findvideos',
|
||||
channel = item.channel,
|
||||
contentTitle = scrapedtitle,
|
||||
contentType = "movie",
|
||||
infoLabels = {"year":year},
|
||||
language=language,
|
||||
quality=quality
|
||||
plot = plot,
|
||||
quality=quality,
|
||||
title = title,
|
||||
thumbnail = scrapedthumbnail,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
#paginacion
|
||||
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
if next:
|
||||
@@ -149,9 +154,9 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
#item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
|
||||
#item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
#item.contentPlot = item.plot
|
||||
patron = '<strong>Ver película online.*?>.*?>([^<]+)'
|
||||
scrapedopcion = scrapertools.find_single_match(data, patron)
|
||||
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
|
||||
@@ -167,14 +172,12 @@ def findvideos(item):
|
||||
urls = scrapertools.find_multiple_matches(datos, '(?:src|href)="([^"]+)')
|
||||
titulo = "Ver en %s " + titulo_opcion
|
||||
for url in urls:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "play",
|
||||
contentThumbnail = item.thumbnail,
|
||||
fulltitle = item.contentTitle,
|
||||
itemlist.append(item.clone(action = "play",
|
||||
title = titulo,
|
||||
url = url
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
#tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
if itemlist:
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel = item.channel, action = ""))
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"adult": false,
|
||||
"language": ["cast", "lat", "vose"],
|
||||
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
|
||||
"thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png",
|
||||
"thumbnail": "https://hdfilmologia.com/templates/hdfilmologia/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
|
||||
@@ -179,7 +179,7 @@ def genres(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'
|
||||
@@ -221,12 +221,11 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
|
||||
patron = '(\w+)src\d+="([^"]+)"'
|
||||
patron = '>([^<]+)</a></li><li><a class="src_tab" id="[^"]+" data-src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for lang, url in matches:
|
||||
|
||||
lang = re.sub(r"1|2|3|4", "", lang)
|
||||
server = servertools.get_server_from_url(url)
|
||||
if 'dropbox' in url:
|
||||
server = 'dropbox'
|
||||
@@ -243,9 +242,9 @@ def findvideos(item):
|
||||
for key in matches:
|
||||
url = 'https://www.dropbox.com/s/%s?dl=1' % (key)
|
||||
server = 'dropbox'
|
||||
languages = {'l': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'e': '[COLOR green](CAST)[/COLOR]',
|
||||
's': '[COLOR red](VOS)[/COLOR]'}
|
||||
languages = {'Latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'Castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'Subtitulado': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
|
||||
@@ -10,25 +10,6 @@ from core import httptools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def downloadpage(url, post=None, headers=None, follow_redirects=True, timeout=None, header_to_get=None):
|
||||
response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
|
||||
timeout=timeout)
|
||||
if header_to_get:
|
||||
return response.headers.get(header_to_get)
|
||||
else:
|
||||
return response.data
|
||||
|
||||
|
||||
def downloadpageGzip(url):
|
||||
response = httptools.downloadpage(url, add_referer=True)
|
||||
return response.data
|
||||
|
||||
|
||||
def getLocationHeaderFromResponse(url):
|
||||
response = httptools.downloadpage(url, only_headers=True)
|
||||
return response.headers.get("location")
|
||||
|
||||
|
||||
def get_header_from_response(url, header_to_get="", post=None, headers=None):
|
||||
header_to_get = header_to_get.lower()
|
||||
response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True)
|
||||
@@ -48,11 +29,6 @@ def printMatches(matches):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def get_match(data, patron, index=0):
|
||||
matches = re.findall(patron, data, flags=re.DOTALL)
|
||||
return matches[index]
|
||||
|
||||
|
||||
def find_single_match(data, patron, index=0):
|
||||
try:
|
||||
matches = re.findall(patron, data, flags=re.DOTALL)
|
||||
|
||||
@@ -18,10 +18,6 @@ def printMatches(matches):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def get_match(data, patron, index=0):
|
||||
return find_single_match(data, patron, index=0)
|
||||
|
||||
|
||||
def find_single_match(data, patron, index=0):
|
||||
try:
|
||||
matches = re.findall(patron, data, flags=re.DOTALL)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "((?:fembed|divload).com/v/[A-z0-9_-]+)",
|
||||
"pattern": "((?:fembed|divload).com/(?:f|v)/[A-z0-9_-]+)",
|
||||
"url": "https://www.\\1"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -16,6 +16,7 @@ def test_video_exists(page_url):
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
page_url = page_url.replace("/f/","/v/")
|
||||
page_url = page_url.replace("/v/","/api/source/")
|
||||
data = httptools.downloadpage(page_url, post={}).data
|
||||
data = jsontools.load(data)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"url": "http://docs.google.com/get_video_info?docid=\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/(?:preview|edit)",
|
||||
"pattern": "(?s)(?:https|http)://(?:docs|drive).google.com/file/d/([^/]+)/(?:preview|edit|view)",
|
||||
"url": "http://docs.google.com/get_video_info?docid=\\1"
|
||||
},
|
||||
{
|
||||
|
||||
42
plugin.video.alfa/servers/videobb.json
Normal file
42
plugin.video.alfa/servers/videobb.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "videobb.ru/v/([A-z0-9]+)",
|
||||
"url": "https://videobb.ru/api/source/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "videobb",
|
||||
"name": "videobb",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://www.cinetux.to/videobb/logo.jpg"
|
||||
}
|
||||
31
plugin.video.alfa/servers/videobb.py
Normal file
31
plugin.video.alfa/servers/videobb.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector videobb By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import jsontools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data:
|
||||
return False, "[videobb] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
id = scrapertools.find_single_match("v/(\w+)", page_url)
|
||||
post = "r=&d=videobb.ru"
|
||||
data = httptools.downloadpage(page_url, post=post).data
|
||||
data = jsontools.load(data)["data"]
|
||||
for url in data:
|
||||
video_urls.append([url["label"] + "p [videobb]", url["file"]])
|
||||
logger.info("Intel11 %s" %data)
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user