Merge pull request #425 from pipcat/master
Pepecine y normalizar vose en json de canales
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
"id": "crunchyroll",
|
||||
"name": "Crunchyroll",
|
||||
"language": ["cast", "lat"],
|
||||
"active": false,
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://i.imgur.com/O49fDS1.png",
|
||||
"categories": [
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"name": "DosPelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "vose"],
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://www.dospelis.com/wp-content/uploads/2018/07/dospelislogo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
|
||||
@@ -3,12 +3,13 @@
|
||||
"name": "PelisR",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "vose"],
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://s22.postimg.cc/gcp2jqbs1/pelisr.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
158
plugin.video.alfa/channels/pepecine.py
Executable file → Normal file
158
plugin.video.alfa/channels/pepecine.py
Executable file → Normal file
@@ -17,7 +17,7 @@ from channels import filtertools
|
||||
|
||||
host = "https://pepecine.io"
|
||||
|
||||
IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'su': 'VOSE', 'vo': 'VO', 'otro': 'OVOS'}
|
||||
IDIOMAS = {'c': 'Castellano', 'i': 'Inglés', 'l': 'Latino', 's': 'VOSE', 'v': 'VO'}
|
||||
list_idiomas = IDIOMAS.values()
|
||||
list_language = ['default']
|
||||
|
||||
@@ -46,7 +46,7 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(Item(title = "Series"))
|
||||
|
||||
itemlist.append(item.clone(title = " Últimas series",
|
||||
itemlist.append(item.clone(title = " Últimos episodios",
|
||||
url = host + '/las-series-online',
|
||||
action = 'list_latest',
|
||||
type = 'series'))
|
||||
@@ -151,10 +151,12 @@ def list_latest(item):
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) ')
|
||||
data = get_source(data_url)
|
||||
data = get_source(data_url).decode('iso-8859-1').encode('utf8')
|
||||
patron = "<div class='online'>.*?<img src=(.*?) class=.*?alt=(.*?) title=.*?"
|
||||
patron += "<b><a href=(.*?) target=.*?align=right><div class=s7>(.*?) <"
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
# Como len(matches)=300, se controla una paginación interna y se muestran en bloques de 20 (perpage)
|
||||
# Se descartan enlaces repetidos en la misma paginación pq algunas pelis se duplican por el idioma/calidad pero apuntan a la misma url
|
||||
count = 0
|
||||
for thumbnail, title, url, language in matches:
|
||||
count += 1
|
||||
@@ -162,33 +164,53 @@ def list_latest(item):
|
||||
if count < item.indexp:
|
||||
continue
|
||||
|
||||
if count >= item.indexp + perpage:
|
||||
break;
|
||||
isDD, language = _extraer_dd_idioma(language)
|
||||
if isDD:
|
||||
continue
|
||||
|
||||
repe = False
|
||||
for it in itemlist:
|
||||
if it.url == host + url:
|
||||
repe = True
|
||||
break
|
||||
if repe:
|
||||
continue
|
||||
|
||||
path = scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)")
|
||||
filtro_list = {"poster_path": path}
|
||||
filtro_list = filtro_list.items()
|
||||
itemlist.append(item.clone(action = 'findvideos',
|
||||
title = title,
|
||||
url = host + url,
|
||||
thumbnail = thumbnail,
|
||||
language = language,
|
||||
infoLabels = {'filtro': filtro_list},
|
||||
)
|
||||
)
|
||||
|
||||
new_item = item.clone(action = 'findvideos',
|
||||
title = title,
|
||||
url = host + url,
|
||||
thumbnail = thumbnail,
|
||||
language = language,
|
||||
infoLabels = {'filtro': filtro_list, 'year': '-'}
|
||||
)
|
||||
|
||||
if item.type == 'series':
|
||||
new_item.contentType = 'episode'
|
||||
season_episode = scrapertools.find_single_match(title, ' (\d+)x(\d+)')
|
||||
if season_episode:
|
||||
new_item.contentSeason = season_episode[0]
|
||||
new_item.contentEpisodeNumber = season_episode[1]
|
||||
new_item.contentSerieName = re.sub(r' \d+x\d+', '', title).strip()
|
||||
else:
|
||||
new_item.contentSerieName = title
|
||||
else:
|
||||
new_item.contentType = 'movie'
|
||||
new_item.contentTitle = title
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
if len(itemlist) >= perpage:
|
||||
break;
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Desde novedades no tenemos el elemento item.channel
|
||||
if item.channel:
|
||||
itemlist.append(item.clone(title = "Página siguiente >>>",
|
||||
indexp = item.indexp + perpage
|
||||
)
|
||||
)
|
||||
if item.indexp > 1:
|
||||
itemlist.append(item.clone(title = "<<< Página anterior",
|
||||
indexp = item.indexp - perpage
|
||||
)
|
||||
)
|
||||
if item.channel and len(itemlist) >= perpage and count + 1 <= len(matches):
|
||||
itemlist.append( item.clone(title = "Página siguiente >>>", indexp = count + 1) )
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -200,7 +222,7 @@ def list_all(item):
|
||||
item.page = 1
|
||||
|
||||
genero = scrapertools.find_single_match(item.url, "genre=(\w+)")
|
||||
data= get_source(item.url)
|
||||
data = get_source(item.url)
|
||||
token = scrapertools.find_single_match(data, "token:.*?'(.*?)'")
|
||||
url = host+'/titles/paginate?_token=%s&perPage=%d&page=%d&order=mc_num_of_votesDesc&type=%s&minRating=&maxRating=&availToStream=1&genres[]=%s' % (token, perpage, item.page, item.type, genero)
|
||||
data = httptools.downloadpage(url).data
|
||||
@@ -225,6 +247,7 @@ def list_all(item):
|
||||
|
||||
if item.type == 'movie':
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentType = 'movie'
|
||||
new_item.contentTitle = element['title']
|
||||
new_item.fulltitle = element['title']
|
||||
if new_item.extra != "links_encoded":
|
||||
@@ -234,6 +257,8 @@ def list_all(item):
|
||||
new_item.action = 'seasons'
|
||||
new_item.url = host + "/ver-serie-tv/" + str(element['id'])
|
||||
new_item.show = element['title']
|
||||
new_item.contentType = 'tvshow'
|
||||
new_item.contentSerieName = element['title']
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
@@ -242,10 +267,6 @@ def list_all(item):
|
||||
itemlist.append(item.clone(title = 'Página siguiente >>>',
|
||||
page = item.page + 1))
|
||||
|
||||
if (int(item.page) > 1):
|
||||
itemlist.append(item.clone(title = '<<< Página anterior',
|
||||
page = item.page - 1))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
@@ -262,17 +283,22 @@ def episodios(item):
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
reSeasons = re.findall("href *= *[\"']([^\"']+)[\"'][^\"']+[\"']sezon[^>]+>([^<]+)+", data)
|
||||
|
||||
itemlist = [item.clone(action = "seasons_episodes",
|
||||
title = title,
|
||||
url = url) for url, title in reSeasons]
|
||||
for url, title in reSeasons:
|
||||
new_item = item.clone(action = "seasons_episodes", title = title, url = url)
|
||||
new_item.contentType = 'season'
|
||||
new_item.contentSeason = title.replace('Temporada', '').strip()
|
||||
itemlist.append(new_item)
|
||||
|
||||
if len(itemlist) == 1:
|
||||
itemlist = seasons_episodes(itemlist[0])
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Opción "Añadir esta serie a la videoteca de XBMC"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
|
||||
@@ -281,16 +307,25 @@ def seasons(item):
|
||||
|
||||
def seasons_episodes(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
reEpisodes = re.findall("<a[^>]+col-sm-3[^>]+href *= *[\"'](?P<url>[^\"']+).*?<img[^>]+src *= *[\"'](?P<thumbnail>[^\"']+).*?<a[^>]+>(?P<title>.*?)</a>", data, re.MULTILINE | re.DOTALL)
|
||||
|
||||
seasons = [item.clone(action = "findvideos",
|
||||
title = re.sub("<b>Episodio (\d+)</b> - T(\d+) \|[^\|]*\| ".format(item.show), "\g<2>x\g<1> - ", title),
|
||||
thumbnail = thumbnail,
|
||||
url = url) for url, thumbnail, title in reEpisodes]
|
||||
reEpisodes = re.findall('<li class="media bord">(.*?)</li>', data, re.MULTILINE | re.DOTALL)
|
||||
for epi in reEpisodes:
|
||||
new_item = item.clone(action = "findvideos")
|
||||
new_item.url = scrapertools.find_single_match(epi, ' href="([^"]*)')
|
||||
new_item.thumbnail = scrapertools.find_single_match(epi, ' src="([^"]*)')
|
||||
new_item.contentType = 'episode'
|
||||
new_item.contentEpisodeNumber = scrapertools.find_single_match(epi, '<b>Episodio (\d+)</b>')
|
||||
title = scrapertools.find_single_match(epi, '<b>Episodio \d+</b> - T\d+ \|[^\|]*\| ([^<]*)').strip()
|
||||
new_item.title = '%sx%s - %s' % (str(item.contentSeason), str(new_item.contentEpisodeNumber), title)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
return seasons
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
@@ -298,28 +333,17 @@ def findvideos(item):
|
||||
itemlist=[]
|
||||
|
||||
if item.extra != "links_encoded":
|
||||
|
||||
# data = httptools.downloadpage(item.url).data
|
||||
# linksRE = re.findall("getFavicon\('(?P<url>[^']+)[^>]+>[^>]+>(?P<language>[^<]+).+?<td[^>]+>(?P<quality>[^<]*).+?<td[^>]+>(?P<antiquity>[^<]*)", data, re.MULTILINE | re.DOTALL)
|
||||
# for url, language, quality, antiquity in linksRE:
|
||||
# logger.info("URL = " + url);
|
||||
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = "renderTab.bind.*?'([^']+).*?"
|
||||
patron += "app.utils.getFavicon.*?<img [^>]*src *= *[\"']/([^\.]+).*?"
|
||||
patron += 'color:#B1FFC5;">([^<]+)'
|
||||
patron = "renderTab\.bind.*?'([^']+)"
|
||||
patron += '.*?<img data-bind="[^"]+"><b>([^<]*)'
|
||||
patron += '.*?<td [^>]*>([^<]*)'
|
||||
patron += '.*?<td [^>]*>([^<]*)'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, language, scrapedquality in matches:
|
||||
isDD = language.startswith("z")
|
||||
if isDD:
|
||||
language = language[1:]
|
||||
|
||||
language = language[0:2]
|
||||
language = IDIOMAS.get(language, language)
|
||||
|
||||
title = ("Ver" if not isDD else "Descargar") + " enlace en %s [" + language + "] [" + scrapedquality + "]"
|
||||
for scrapedurl, language, scrapedquality, scrapedwhen in matches:
|
||||
isDD, language = _extraer_dd_idioma(language)
|
||||
if not isDD:
|
||||
title = "%s [" + language + "] [" + scrapedquality + "] [" + scrapedwhen + "]"
|
||||
itemlist.append(item.clone(action = 'play',
|
||||
title = title,
|
||||
url = scrapedurl,
|
||||
@@ -327,22 +351,17 @@ def findvideos(item):
|
||||
)
|
||||
)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
else:
|
||||
for link in item.url:
|
||||
|
||||
language = scrapertools.find_single_match(link['label'], '/([^\.]+)')
|
||||
isDD = language.startswith("z")
|
||||
if isDD:
|
||||
language = language[1:]
|
||||
|
||||
language = language[0:2]
|
||||
|
||||
isDD, language = _extraer_dd_idioma(link['label'])
|
||||
if not isDD:
|
||||
itemlist.append(item.clone(action='play',
|
||||
title = item.title,
|
||||
url= link['url'],
|
||||
language=IDIOMAS.get(language, language),
|
||||
language=language,
|
||||
quality=link['quality']))
|
||||
|
||||
itemlist=servertools.get_servers_itemlist(itemlist)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = '%s [%s] [%s]' % (videoitem.server.capitalize(), videoitem.language, videoitem.quality)
|
||||
@@ -365,3 +384,10 @@ def findvideos(item):
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
# idiomas: l, c, s, i, v (lat, cast, subt, inglés, vo). Si empieza por z es descarga directa
|
||||
def _extraer_dd_idioma(lang):
|
||||
lang = lang.strip().lower()
|
||||
isDD = lang.startswith('z')
|
||||
lg = lang[1] if isDD else lang[0]
|
||||
return isDD, IDIOMAS.get(lg, lang)
|
||||
|
||||
@@ -3,12 +3,13 @@
|
||||
"name": "PoseidonHD",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "vose"],
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://poseidonhd.com/wp-content/uploads/2017/06/logo2.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
@@ -17,7 +17,7 @@ from channels import autoplay
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'mx': 'Latino', 'dk':'Latino', 'es': 'Castellano', 'en': 'VOSE', 'gb':'VOSE'}
|
||||
IDIOMAS = {'mx': 'Latino', 'dk':'Latino', 'es': 'Castellano', 'en': 'VOSE', 'gb':'VOSE', 'de':'Alemán'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = ['HD', 'SD', 'CAM']
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"name": "SiPeliculas",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "[lat]",
|
||||
"language": ["lat"],
|
||||
"banner": "https://s24.postimg.cc/5wcznkxhv/sipeliculas.png",
|
||||
"thumbnail": "https://s23.postimg.cc/adrl2k5mz/sipeliculas.png",
|
||||
"categories": [
|
||||
|
||||
@@ -3,11 +3,12 @@
|
||||
"name": "WikiSeries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "vo", "vose"],
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://s31.postimg.cc/tnmcrytnv/16142379_1847422438815031_3788419094563167644_n.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -15,7 +15,7 @@ headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/2
|
||||
def test_video_exists(page_url):
|
||||
referer = page_url.replace('iframe', 'preview')
|
||||
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
|
||||
if data == "File was deleted":
|
||||
if data == "File was deleted" or data == '':
|
||||
return False, "[powvideo] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
@@ -33,26 +33,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
|
||||
url = scrapertools.find_single_match(unpacked, "(?:src):\\\\'([^\\\\]+.mp4)\\\\'")
|
||||
|
||||
matches = scrapertools.find_single_match(data, "\['splice'\]\(0x([0-9a-fA-F]*),0x([0-9a-fA-F]*)\);")
|
||||
if matches:
|
||||
url = decode_powvideo_url(url, int(matches[0], 16), int(matches[1], 16))
|
||||
else:
|
||||
matches = scrapertools.find_single_match(data, "\(0x([0-9a-fA-F]*),0x([0-9a-fA-F]*)\);")
|
||||
if matches:
|
||||
url = decode_powvideo_url(url, int(matches[0], 16), int(matches[1], 16))
|
||||
else:
|
||||
logger.debug('No detectado splice! Revisar sistema de decode...')
|
||||
# ~ logger.debug(data)
|
||||
|
||||
itemlist.append([".mp4" + " [powvideo]", url])
|
||||
itemlist.append([".mp4" + " [powvideo]", decode_video_url(url)])
|
||||
itemlist.sort(key=lambda x: x[0], reverse=True)
|
||||
return itemlist
|
||||
|
||||
def decode_powvideo_url(url, desde, num):
|
||||
def decode_video_url(url):
|
||||
tria = re.compile('[0-9a-z]{40,}', re.IGNORECASE).findall(url)[0]
|
||||
gira = tria[::-1]
|
||||
if desde == 0:
|
||||
x = gira[num:]
|
||||
else:
|
||||
x = gira[:desde] + gira[(desde+num):]
|
||||
x = gira[1:]
|
||||
return re.sub(tria, x, url)
|
||||
|
||||
Reference in New Issue
Block a user