Merge pull request #97 from Alfa-beto/channels
Ajustes, Reparado y Nuevo
This commit is contained in:
@@ -10,31 +10,10 @@
|
||||
"thumbnail": "https://s2.postimg.org/jivgi4ak9/doomtv.png",
|
||||
"banner": "https://s32.postimg.org/6gxyripvp/doomtv_banner.png",
|
||||
"version": 1,
|
||||
"changes": [
|
||||
{
|
||||
"date": "24/06/2017",
|
||||
"description": "Cambios para autoplay"
|
||||
},
|
||||
{
|
||||
"date": "06/06/2017",
|
||||
"description": "COmpatibilida con AutoPlay"
|
||||
},
|
||||
{
|
||||
"date": "12/05/2017",
|
||||
"description": "Fix generos y enlaces"
|
||||
},
|
||||
{
|
||||
"date": "15/03/2017",
|
||||
"description": "limpieza código"
|
||||
},
|
||||
{
|
||||
"date": "01/02/2017",
|
||||
"description": "Release."
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"latino",
|
||||
"movie"
|
||||
"movie",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
303
plugin.video.alfa/channels/doomtv.py
Executable file → Normal file
303
plugin.video.alfa/channels/doomtv.py
Executable file → Normal file
@@ -18,7 +18,7 @@ list_language = IDIOMAS.values()
|
||||
|
||||
CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'}
|
||||
list_quality = CALIDADES.values()
|
||||
list_servers = ['directo']
|
||||
list_servers = ['directo', 'openload']
|
||||
|
||||
host = 'http://doomtv.net/'
|
||||
headers = {
|
||||
@@ -32,10 +32,10 @@ tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
|
||||
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
|
||||
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
|
||||
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
|
||||
"Ciencia Ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
|
||||
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
|
||||
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
|
||||
"Documentales": "https://s16.postimg.org/7xjj4bmol/documental.png",
|
||||
"Musical": "https://s29.postimg.org/bbxmdh9c7/musical.png",
|
||||
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
|
||||
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
|
||||
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
|
||||
"Bélico Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
|
||||
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
|
||||
@@ -56,7 +56,6 @@ tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(
|
||||
@@ -64,7 +63,7 @@ def mainlist(item):
|
||||
action="lista",
|
||||
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
|
||||
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
|
||||
url=host
|
||||
url='%s%s'%(host,'peliculas/page/1')
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
@@ -72,34 +71,15 @@ def mainlist(item):
|
||||
action="seccion",
|
||||
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
|
||||
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
|
||||
url=host,
|
||||
extra='generos'
|
||||
url='%s%s' % (host, 'peliculas/page/1'),
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Mas vistas",
|
||||
action="seccion",
|
||||
item.clone(title="Mas Vistas",
|
||||
action="lista",
|
||||
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
|
||||
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png',
|
||||
url=host,
|
||||
extra='masvistas'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Recomendadas",
|
||||
action="lista",
|
||||
thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png',
|
||||
fanart='https://s12.postimg.org/s881laywd/recomendadas.png',
|
||||
url=host,
|
||||
extra='recomendadas'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Por año",
|
||||
action="seccion",
|
||||
thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png',
|
||||
fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png',
|
||||
url=host, extra='poraño'
|
||||
url='%s%s'%(host,'top-imdb/page/1'),
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
@@ -110,8 +90,6 @@ def mainlist(item):
|
||||
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -123,23 +101,11 @@ def lista(item):
|
||||
next_page_url = ''
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron = 'movie-id=.*?href=(.*?) data-url.*?quality>(.*?)'
|
||||
patron += '<img data-original=(.*?) class.*?<h2>(.*?)<\/h2>.*?<p>(.*?)<\/p>'
|
||||
|
||||
if item.extra == 'recomendadas':
|
||||
patron = '<a href="(.*?)">.*?'
|
||||
patron += '<div class="imgss">.*?'
|
||||
patron += '<img src="(.*?)" alt="(.*?)(?:–.*?|\(.*?|–|").*?'
|
||||
patron += '<div class="imdb">.*?'
|
||||
patron += '<\/a>.*?'
|
||||
patron += '<span class="ttps">.*?<\/span>.*?'
|
||||
patron += '<span class="ytps">(.*?)<\/span><\/div>'
|
||||
elif item.extra in ['generos', 'poraño', 'buscar']:
|
||||
patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?)(?:\s|\/)><a href=(.*?)>.*?'
|
||||
patron += '<h2>.*?<\/h2>.*?(?:<span class=year>(.*?)<\/span>)?.*?<\/div>'
|
||||
else:
|
||||
patron = '<div class="imagen">.*?'
|
||||
patron += '<img src="(.*?)" alt="(.*?)(?:–.*?|\(.*?|–|").*?'
|
||||
patron += '<a href="([^"]+)"><(?:span) class="player"><\/span><\/a>.*?'
|
||||
patron += 'h2>\s*.*?(?:year)">(.*?)<\/span>.*?<\/div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if item.next_page != 'b':
|
||||
@@ -150,39 +116,36 @@ def lista(item):
|
||||
else:
|
||||
matches = matches[max_items:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<div class="siguiente"><a href="(.*?)"|\/\?'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
next_page_url = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
next_page_str = scrapertools.find_single_match(data,"<li class='active'><a class=''>(\d+)</a>")
|
||||
next_page_num = int(next_page_str)+1
|
||||
page_base = re.sub(r'(page\/\d+)','', item.url)
|
||||
next_page_url = '%s%s%s'%(page_base,'page/',next_page_num)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
|
||||
if item.extra == 'recomendadas':
|
||||
url = scrapedthumbnail
|
||||
title = scrapedurl
|
||||
thumbnail = scrapedtitle
|
||||
else:
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
title = scrapedtitle
|
||||
year = scrapedyear
|
||||
if next_page_url:
|
||||
next_page_url = next_page_url
|
||||
|
||||
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches:
|
||||
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
|
||||
filtro_list = {"poster_path": filtro_thumb.strip()}
|
||||
filtro_list = filtro_list.items()
|
||||
title = scrapedtitle
|
||||
fanart = ''
|
||||
plot = ''
|
||||
|
||||
if 'serie' not in url:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentTitle=title,
|
||||
infoLabels={'year': year},
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
plot = plot
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
infoLabels={'filtro': filtro_list},
|
||||
fanart=fanart,
|
||||
contentTitle=title
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
# Paginacion
|
||||
if next_page_url != '':
|
||||
itemlist.append(
|
||||
@@ -203,17 +166,8 @@ def seccion(item):
|
||||
itemlist = []
|
||||
duplicado = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if item.extra == 'generos':
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
accion = 'lista'
|
||||
if item.extra == 'masvistas':
|
||||
patron = '<b>\d*<\/b>\s*<a href="(.*?)">(.*?<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>)'
|
||||
accion = 'findvideos'
|
||||
elif item.extra == 'poraño':
|
||||
patron = '<li><a class="ito" HREF="(.*?)">(.*?)<\/a><\/li>'
|
||||
else:
|
||||
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)<\/i>'
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = 'menu-item-object-category menu-item-\d+><a href=(.*?)>(.*?)<\/a><\/li>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -221,61 +175,19 @@ def seccion(item):
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = ''
|
||||
fanart = ''
|
||||
plot = ''
|
||||
year = ''
|
||||
contentTitle = ''
|
||||
if item.extra == 'masvistas':
|
||||
year = re.findall(r'\b\d{4}\b', scrapedtitle)
|
||||
title = re.sub(r'<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>', '', scrapedtitle)
|
||||
contentTitle = title
|
||||
title = title + ' (' + year[0] + ')'
|
||||
|
||||
elif item.extra == 'generos':
|
||||
title = re.sub(r'<\/a> <i>\d+', '', scrapedtitle)
|
||||
cantidad = re.findall(r'.*?<\/a> <i>(\d+)', scrapedtitle)
|
||||
th_title = title
|
||||
title = title + ' (' + cantidad[0] + ')'
|
||||
thumbnail = tgenero[th_title]
|
||||
fanart = thumbnail
|
||||
|
||||
if title in tgenero:
|
||||
thumbnail = tgenero[title]
|
||||
if url not in duplicado:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action=accion,
|
||||
action='lista',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentTitle=contentTitle,
|
||||
infoLabels={'year': year}
|
||||
thumbnail = thumbnail
|
||||
))
|
||||
duplicado.append(url)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def unpack(packed):
|
||||
p, c, k = re.search("}\('(.*)', *\d+, *(\d+), *'(.*)'\.", packed, re.DOTALL).groups()
|
||||
for c in reversed(range(int(c))):
|
||||
if k.split('|')[c]: p = re.sub(r'(\b%s\b)' % c, k.split('|')[c], p)
|
||||
p = p.replace('\\', '')
|
||||
p = p.decode('string_escape')
|
||||
return p
|
||||
|
||||
|
||||
def getinfo(page_url):
|
||||
info = ()
|
||||
logger.info()
|
||||
data = httptools.downloadpage(page_url).data
|
||||
thumbnail = scrapertools.find_single_match(data, '<div class="cover" style="background-image: url\((.*?)\);')
|
||||
plot = scrapertools.find_single_match(data, '<h2>Synopsis<\/h2>\s*<p>(.*?)<\/p>')
|
||||
info = (plot, thumbnail)
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
@@ -305,98 +217,47 @@ def newest(categoria):
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_url(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
duplicado = []
|
||||
patrones = ["{'label':(.*?),.*?'file':'(.*?)'}", "{file:'(.*?redirector.*?),label:'(.*?)'}"]
|
||||
data = httptools.downloadpage(item.url, headers=headers, cookies=False).data
|
||||
patron = 'class="player-content"><iframe src="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option in matches:
|
||||
if 'allplayer' in option:
|
||||
url = 'http:/' + option.replace('//', '/')
|
||||
data = httptools.downloadpage(url, headers=headers, cookies=False).data
|
||||
packed = scrapertools.find_single_match(data, "<div id='allplayer'>.*?(eval\(function\(p,a,c,k.*?\)\)\))")
|
||||
if packed:
|
||||
unpacked = unpack(packed)
|
||||
video_urls = []
|
||||
if "vimeocdn" in unpacked:
|
||||
|
||||
streams = scrapertools.find_multiple_matches(unpacked,
|
||||
"{file:'(.*?)',type:'video/.*?',label:'(.*?)'")
|
||||
for video_url, quality in streams:
|
||||
video_urls.append([video_url, quality])
|
||||
else:
|
||||
doc_id = scrapertools.find_single_match(unpacked, 'driveid=(.*?)&')
|
||||
doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id
|
||||
response = httptools.downloadpage(doc_url, cookies=False)
|
||||
cookies = ""
|
||||
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||
for c in cookie:
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
headers_string = "|Cookie=" + cookies
|
||||
|
||||
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
|
||||
streams = scrapertools.find_multiple_matches(url_streams,
|
||||
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
|
||||
|
||||
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '59': '480p'}
|
||||
for itag, video_url in streams:
|
||||
video_url += headers_string
|
||||
video_urls.append([video_url, itags[itag]])
|
||||
|
||||
for video_item in video_urls:
|
||||
calidad = video_item[1]
|
||||
title = '%s [%s]' % (item.contentTitle, calidad)
|
||||
url = video_item[0]
|
||||
|
||||
if url not in duplicado:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='play',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=item.plot,
|
||||
fanart=item.fanart,
|
||||
contentTitle=item.contentTitle,
|
||||
language=IDIOMAS['latino'],
|
||||
server='directo',
|
||||
quality=CALIDADES[calidad],
|
||||
context=item.context
|
||||
))
|
||||
duplicado.append(url)
|
||||
else:
|
||||
itemlist.extend(servertools.find_video_items(data=option))
|
||||
|
||||
for videoitem in itemlist:
|
||||
|
||||
if 'Enlace' in videoitem.title:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
|
||||
videoitem.language = 'latino'
|
||||
videoitem.quality = 'default'
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist = get_url(item)
|
||||
#itemlist = get_url(item)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
# Requerido para FilterTools
|
||||
for option, urls in matches:
|
||||
quality = scrapertools.find_single_match(data, '<div class=les-content><a href=#%s>(.*?)<\/a><\/div>'%option)
|
||||
title = '%s (%s)' % (item.title, quality)
|
||||
if 'content' in urls:
|
||||
urls = '%s%s'%('http:',urls)
|
||||
hidden_data = httptools.downloadpage(urls).data
|
||||
hidden_data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", hidden_data)
|
||||
patron = 'sources: \[{file: (.*?),'
|
||||
matches = re.compile(patron, re.DOTALL).findall(hidden_data)
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
for videoitem in matches:
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
new_item = Item(
|
||||
channel = item.channel,
|
||||
url = videoitem,
|
||||
title = title,
|
||||
contentTitle = item.title,
|
||||
action = 'play',
|
||||
quality = quality
|
||||
)
|
||||
itemlist.append(new_item)
|
||||
else:
|
||||
new_item = Item(
|
||||
channel=item.channel,
|
||||
url=urls,
|
||||
title=title,
|
||||
contentTitle=item.title,
|
||||
action='play',
|
||||
quality = quality
|
||||
)
|
||||
itemlist.append(new_item)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
|
||||
@@ -7,6 +7,7 @@ from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -237,39 +238,43 @@ def findvideos(item):
|
||||
language=lang,
|
||||
url=url
|
||||
))
|
||||
logger.debug('templist: %s' % templist)
|
||||
for videoitem in templist:
|
||||
logger.debug('videoitem.language: %s' % videoitem.language)
|
||||
data = httptools.downloadpage(videoitem.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
id = scrapertools.find_single_match(data, 'var _SOURCE =.*?source:(.*?),')
|
||||
if videoitem.language == 'SUB':
|
||||
sub = scrapertools.find_single_match(data, 'var _SOURCE =.*?srt:(.*?),')
|
||||
sub = sub.replace('\\', '')
|
||||
else:
|
||||
sub = ''
|
||||
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
|
||||
'=%s&srt=%s' % (id, sub)
|
||||
urls_list = scrapertools.find_multiple_matches(data, '({"type":.*?})')
|
||||
for element in urls_list:
|
||||
json_data=jsontools.load(element)
|
||||
|
||||
data = httptools.downloadpage(new_url).data
|
||||
id = json_data['id']
|
||||
sub = json_data['srt']
|
||||
url = json_data['source']
|
||||
|
||||
url = scrapertools.find_single_match(data, '<iframe src="(.*?preview)"')
|
||||
title = videoitem.contentTitle + ' (' + audio[videoitem.language] + ')'
|
||||
logger.debug('url: %s' % url)
|
||||
video_list.extend(servertools.find_video_items(data=url))
|
||||
for urls in video_list:
|
||||
if urls.language == '':
|
||||
urls.language = videoitem.language
|
||||
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server)
|
||||
logger.debug('video_list: %s' % video_list)
|
||||
# itemlist.append(item.clone(title= title, url = url, action = 'play', subtitle = sub))
|
||||
quality = json_data['quality']
|
||||
if 'http' not in url :
|
||||
|
||||
for video_url in video_list:
|
||||
video_url.channel = item.channel
|
||||
video_url.action = 'play'
|
||||
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
|
||||
'=%s&srt=%s' % (url, sub)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
data = httptools.downloadpage(new_url).data
|
||||
data = re.sub(r'\\', "", data)
|
||||
video_list.extend(servertools.find_video_items(data=data))
|
||||
for urls in video_list:
|
||||
if urls.language == '':
|
||||
urls.language = videoitem.language
|
||||
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server)
|
||||
|
||||
|
||||
for video_url in video_list:
|
||||
video_url.channel = item.channel
|
||||
video_url.action = 'play'
|
||||
video_url.quality = quality
|
||||
else:
|
||||
server = servertools.get_server_from_url(url)
|
||||
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality,
|
||||
server=server))
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
|
||||
video_list.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
|
||||
27
plugin.video.alfa/channels/pelisplusco.json
Normal file
27
plugin.video.alfa/channels/pelisplusco.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"id": "pelisplusco",
|
||||
"name": "PelisPlus.co",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://s26.postimg.org/jov1pmbh5/pelisplusco.png",
|
||||
"banner": "https://s26.postimg.org/4hf259jmh/pelisplusco-banner.png",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"latino",
|
||||
"movie",
|
||||
"tvshow",
|
||||
"documentary",
|
||||
"direct"
|
||||
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
310
plugin.video.alfa/channels/pelisplusco.py
Normal file
310
plugin.video.alfa/channels/pelisplusco.py
Normal file
@@ -0,0 +1,310 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PelisPlus.co -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
|
||||
host = 'http://pelisplus.co'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Peliculas",
|
||||
action="movie_menu",
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Series",
|
||||
action="series_menu",
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def movie_menu(item):
|
||||
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Estrenos",
|
||||
action="list_all",
|
||||
url = host+'/estrenos/',
|
||||
type = 'normal'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Generos",
|
||||
action="seccion",
|
||||
url=host,
|
||||
seccion='generos'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Por Año",
|
||||
action="seccion",
|
||||
url=host,
|
||||
seccion='anios'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def series_menu(item):
|
||||
|
||||
logger.info()
|
||||
|
||||
itemlist =[]
|
||||
|
||||
itemlist.append(item.clone(title="Todas",
|
||||
action="list_all",
|
||||
url=host + '/series/',
|
||||
type='serie'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def list_all (item):
|
||||
logger.info ()
|
||||
itemlist = []
|
||||
|
||||
if item.type not in ['normal', 'seccion', 'serie']:
|
||||
post = {'page':item.page, 'type':item.type,'id':item.id}
|
||||
post = urllib.urlencode(post)
|
||||
data =httptools.downloadpage(item.url, post=post).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
else:
|
||||
data = get_source(item.url)
|
||||
if item.type == 'serie' or item.type == 'recents':
|
||||
contentType = 'serie'
|
||||
action = 'seasons'
|
||||
else:
|
||||
contentType = 'pelicula'
|
||||
action = 'findvideos'
|
||||
|
||||
patron = 'item-%s><a href=(.*?)><figure><img src=https:(.*?)'%contentType
|
||||
patron += ' alt=><\/figure><p>(.*?)<\/p><span>(.*?)<\/span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
url = host+scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
plot= ''
|
||||
contentTitle=scrapedtitle
|
||||
title = contentTitle
|
||||
year = scrapedyear
|
||||
fanart =''
|
||||
|
||||
new_item=item.clone(action=action,
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
infoLabels ={'year':year}
|
||||
)
|
||||
if contentType =='serie':
|
||||
new_item.contentSerieName=title
|
||||
else:
|
||||
new_item.contentTitle = title
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
|
||||
#Paginacion
|
||||
|
||||
next_page_valid = scrapertools.find_single_match(data, '<div class=butmore(?: site=series|) page=(.*?) id=(.*?) '
|
||||
'type=(.*?) limit=.*?>')
|
||||
if item.type != 'normal' and (len(itemlist)>19 or next_page_valid):
|
||||
type = item.type
|
||||
if item.type == 'serie':
|
||||
type = 'recents'
|
||||
if next_page_valid:
|
||||
page = str(int(next_page_valid[0])+1)
|
||||
if item.type != 'recents':
|
||||
id = next_page_valid[1]
|
||||
type = next_page_valid[2]
|
||||
else:
|
||||
id =''
|
||||
else:
|
||||
page = str(int(item.page)+1)
|
||||
id = item.id
|
||||
|
||||
if type =='recents':
|
||||
type_pagination = '/series/pagination'
|
||||
else:
|
||||
type_pagination = '/pagination'
|
||||
|
||||
url = host+type_pagination
|
||||
|
||||
itemlist.append(item.clone(action = "list_all",
|
||||
title = 'Siguiente >>>',
|
||||
page=page,
|
||||
url = url,
|
||||
id = id,
|
||||
type = type
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def seccion(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
if item.seccion == 'generos':
|
||||
patron = '<li><a href=(.*?)><i class=ion-cube><\/i>(.*?)<\/span>'
|
||||
type = 'genre'
|
||||
elif item.seccion == 'anios':
|
||||
patron = '<li><a href=(\/peliculas.*?)>(\d{4})<\/a>'
|
||||
type = 'year'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
if item.seccion == 'generos':
|
||||
cant = re.sub(r'.*?<span class=cant-genre>','',scrapedtitle)
|
||||
only_title = re.sub(r'<.*','',scrapedtitle).rstrip()
|
||||
title = only_title+' (%s)'%cant
|
||||
|
||||
url = host+scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title=title,
|
||||
fulltitle=item.title,
|
||||
url=url,
|
||||
type = 'seccion'
|
||||
))
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <')
|
||||
next_page_url = host + next_page
|
||||
import inspect
|
||||
if next_page != '':
|
||||
itemlist.append(item.clone(action="seccion",
|
||||
title='Siguiente >>>',
|
||||
url=next_page_url,
|
||||
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
itemlist =[]
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron ='<i class=ion-chevron-down arrow><\/i>(.*?)<\/div>'
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels=item.infoLabels
|
||||
|
||||
for title in matches:
|
||||
season = title.replace('Temporada ','')
|
||||
infoLabels['season'] = season
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
title=title,
|
||||
url=item.url,
|
||||
action='season_episodes',
|
||||
contentSerieName= item.contentSerieName,
|
||||
contentSeasonNumber = season,
|
||||
infoLabels=infoLabels
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist[::-1]
|
||||
|
||||
def season_episodes(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
season = str(item.infoLabels['season'])
|
||||
patron = '<a href=(.*?temporada-%s\/.*?) title=.*?i-play><\/i> (.*?)<\/a>'%season
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for url, episode in matches:
|
||||
episodenumber = re.sub('C.* ','',episode)
|
||||
infoLabels['episode'] = episodenumber
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title= episode,
|
||||
url = host+url,
|
||||
action = 'findvideos',
|
||||
infoLabels=infoLabels,
|
||||
contentEpisodeNumber=episode
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
|
||||
return itemlist[::-1]
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
video_list = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = 'data-source=(.*?) data.*?-srt=(.*?) data-iframe=0><a>(.*?) - (.*?)<\/a>'
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, sub, language, quality in matches:
|
||||
|
||||
if 'http' not in url:
|
||||
|
||||
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
|
||||
'=%s&srt=%s' % (url, sub)
|
||||
data = httptools.downloadpage(new_url).data
|
||||
data = re.sub(r'\\', "", data)
|
||||
video_list.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for video_url in video_list:
|
||||
video_url.channel = item.channel
|
||||
video_url.action = 'play'
|
||||
video_url.title = item.title + '(%s) (%s)' % (language, video_url.server)
|
||||
if video_url.language == '':
|
||||
video_url.language = language
|
||||
video_url.subtitle = sub
|
||||
video_url.contentTitle=item.contentTitle
|
||||
else:
|
||||
server = servertools.get_server_from_url(url)
|
||||
video_list.append(item.clone(title=item.title,
|
||||
url=url,
|
||||
action='play',
|
||||
quality = quality,
|
||||
language = language,
|
||||
server=server,
|
||||
subtitle = sub
|
||||
))
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos",
|
||||
contentTitle=item.contentTitle
|
||||
))
|
||||
|
||||
return video_list
|
||||
|
||||
@@ -16,6 +16,10 @@
|
||||
{
|
||||
"pattern": "(?s)https://drive.google.com/file/d/([^/]+)/preview",
|
||||
"url": "http://docs.google.com/get_video_info?docid=\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "(?s)\"https://(?!docs)(.*?).googleusercontent.com/([^\"]+)",
|
||||
"url": "https://\\1.googleusercontent.com/\\2"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -8,15 +8,16 @@ from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
|
||||
if 'googleusercontent' in page_url:
|
||||
return True, ""
|
||||
return True, ""
|
||||
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
||||
if "no+existe" in response.data:
|
||||
return False, "[gvideo] El video no existe o ha sido borrado"
|
||||
if "Se+ha+excedido+el" in response.data:
|
||||
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
|
||||
if "No+tienes+permiso" in response.data:
|
||||
return False, "[gvideo] No tiene permiso para acceder a este video"
|
||||
return False, "[gvideo] No tienes permiso para acceder a este video"
|
||||
|
||||
return True, ""
|
||||
|
||||
@@ -29,13 +30,9 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
if 'googleusercontent' in page_url:
|
||||
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
|
||||
url=data.headers['location']
|
||||
logger.debug('url: %s' % url)
|
||||
logger.debug("data.headers: %s" % data.headers)
|
||||
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
|
||||
logger.debug('quality: %s' % quality)
|
||||
|
||||
streams.append((quality, url))
|
||||
logger.debug('streams: %s' % streams)
|
||||
headers_string=""
|
||||
|
||||
else:
|
||||
@@ -46,6 +43,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
logger.info("Intel88 %s" %data)
|
||||
headers_string = "|Cookie=" + cookies
|
||||
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
|
||||
streams = scrapertools.find_multiple_matches(url_streams,
|
||||
|
||||
Reference in New Issue
Block a user