Merge pull request #462 from Alfa-beto/fixes

Correcciones y novedades
This commit is contained in:
Alfa
2018-10-19 11:33:42 -05:00
committed by GitHub
23 changed files with 1527 additions and 126 deletions

View File

@@ -3,8 +3,8 @@
"name": "BlogHorror",
"active": true,
"adult": false,
"language": [""],
"thumbnail": "",
"language": [],
"thumbnail": "https://i.postimg.cc/gcgQhKTL/2018-10-10_20_34_57-_Peliculas_de_Terror_BLOGHORROR.png",
"banner": "",
"categories": [
"movie",
@@ -29,17 +29,9 @@
"visible": true
},
{
"id": "include_in_newest_latino",
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
@@ -51,18 +43,6 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
]
}
]
}

View File

@@ -3,7 +3,7 @@
"name": "Dilo",
"active": true,
"adult": false,
"language": [],
"language": ["cast", "lat"],
"thumbnail": "https://s22.postimg.cc/u6efsniqp/dilo.png",
"banner": "",
"categories": [

View File

@@ -71,7 +71,7 @@ def menu_movies(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -92,11 +92,9 @@ def section(item):
duplicados=[]
data = get_source(host+'/'+item.type)
if 'Genero' in item.title:
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) >(.*?)/i>'
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >(.*?)/i>'
elif 'Año' in item.title:
patron = '<li><a href=(.*?release.*?)>(.*?)</a>'
elif 'Calidad' in item.title:
patron = 'menu-item-object-dtquality menu-item-\d+><a href=(.*?)>(.*?)</a>'
patron = '<li><a href="(.*?release.*?)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -123,10 +121,12 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id=post-\d+ class=item movies><div class=poster><img src=(.*?) alt=(.*?)>.*?quality>(.*?)'
patron += '</span><\/div><a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>.*?flags(.*?)metadata'
patron = '<article id="post-\d+" class="item movies"><div class="poster">.?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='"quality">([^<]+)</span><\/div>.?<a href="([^"]+)">.*?'
patron +='<\/h3>.?<span>([^"]+)<\/span><\/div>.*?"flags"(.*?)metadata'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:
@@ -148,8 +148,8 @@ def list_all(item):
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id=post-\d+ class=item tvshows><div class=poster><img src=(.*?) alt=(.*?)>.*?'
patron += '<a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>'
patron = '<article id="post-\d+" class="item tvshows">.?<div class="poster">.?<img src="([^"]+)"'
patron += ' alt="([^"]+)">.*?<a href="([^"]+)">.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
@@ -168,8 +168,7 @@ def list_all(item):
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
#url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=([^ ]+) />")
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -181,7 +180,7 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='Temporada \d+'
patron='Temporada.?\d+'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -215,7 +214,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class=numerando>%s - (\d+)</div><div class=episodiotitle><a href=(.*?)>(.*?)<' % item.infoLabels['season']
patron='class="numerando">%s - (\d+)</div>.?<div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -237,7 +236,7 @@ def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id=option-(\d+).*?rptss src=(.*?) frameborder'
patron = 'id="option-(\d+)".*?rptss" src="([^"]+)" frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
lang=''
for option, scrapedurl in matches:
@@ -292,7 +291,8 @@ def search_results(item):
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href=(.*?)><img src=(.*?) alt=(.*?) />.*?meta.*?year>(.*?)<(.*?)<p>(.*?)</p>'
patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" \/>.*?meta.*?'
patron += '"year">([^<]+)<(.*?)<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

View File

@@ -0,0 +1,46 @@
{
"id": "dramasjc",
"name": "DramasJC",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://www.dramasjc.com/wp-content/uploads/2018/03/logo.png",
"banner": "",
"version": 1,
"categories": [
"tvshow",
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,281 @@
# -*- coding: utf-8 -*-
# -*- Channel DramasJC -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://www.dramasjc.com/'
IDIOMAS = {'VOSE': 'VOSE', 'VO':'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['okru', 'mailru', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Doramas", action="menu_doramas",
thumbnail=get_thumb('doramas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", url=host+'peliculas/',
type='movie', thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_doramas(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="list_all", url=host + 'series',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
full_data = data
data = scrapertools.find_single_match(data, '<ul class="MovieList NoLmtxt.*?>(.*?)</ul>')
patron = '<article id="post-.*?<a href="([^"]+)">.*?(?:<img |-)src="([^"]+)".*?alt=".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.?(?:</a>|<span class="Year">(\d{4})<\/span>).*?'
patron += '(movie|TV)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentname = scrapedtitle[0].strip()
else:
contentname = scrapedtitle
contentname = re.sub('\(.*?\)','', contentname)
title = '%s [%s]'%(contentname, year)
thumbnail = 'http:'+scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'year':year}
)
if type == 'movie':
new_item.contentTitle = contentname
new_item.action = 'findvideos'
else:
new_item.contentSerieName = contentname
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
full_data = get_source(host)
data = scrapertools.find_single_match(full_data, '<a href="#">Dramas por Genero</a>(.*?)</ul>')
patron = '<a href="([^ ]+)">([^<]+)<'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
new_item = Item(channel=item.channel, title= title, url=url, action=action)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class="Title AA-Season On" data-tab="1">Temporada <span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporada in matches:
title = 'Temporada %s' % temporada
contentSeasonNumber = temporada
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodesxseason',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, '>Temporada <span>%s</span>(.*?)</ul>' % season)
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedurl, scrapedtitle in matches:
epi = str(ep)
title = season + 'x%s - Episodio %s' % (epi, epi)
url = scrapedurl
contentEpisodeNumber = epi
item.infoLabels['episode'] = contentEpisodeNumber
if 'próximamente' not in scrapedtitle.lower():
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
ep += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)"')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>([^<]+)</span>'%option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
if 'sub' in language.lower():
language='VOSE'
else:
language = 'VO'
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play'))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host+'peliculas/'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,88 @@
{
"id": "peliculashd",
"name": "PeliculasHD",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://i.postimg.cc/05HTS7wC/peliculashd.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos",
"direct"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,361 @@
# -*- coding: utf-8 -*-
# -*- Channel PeliculasHD -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'Latino': 'Latino', 'Español': 'Castellano', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = [
'directo',
'openload',
'rapidvideo'
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculashd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculashd')
host = 'https://peliculashd.site/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'/genero/serie', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title='Anime', url=host + '/genero/anime', action='list_all', type='tvshows',
thumbnail=get_thumb('anime', auto=True)))
itemlist.append(
Item(channel=item.channel, title='Telenovelas', url=host + '/genero/telenovelas-teleseries', action='list_all', type='tvshows',
thumbnail=get_thumb('telenovelas', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
for lang in lang_list:
if lang == 'en':
lang = 'vose'
if lang not in language:
language.append(lang)
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
full_data = get_source(host+'/'+item.type)
if 'Genero' in item.title:
data = scrapertools.find_single_match(full_data, '<a>Generos</a>(.*?)</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(full_data, '<h2>Busqueda por Año</h2>(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
plot=''
title = scrapedtitle
url = scrapedurl
if title not in duplicados and title.lower() != 'proximamente':
itemlist.append(Item(channel=item.channel, url=url, title=title, plot=plot, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id="post-\d+" class="item movies"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '"quality">([^<]+)</span><\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id="post-\d+" class="item tvshows"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
#url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^ ]+)" />')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada \d+'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type='tv',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
import urllib
itemlist = []
data = get_source(item.url)
patron = 'data-post="(\d+)" data-nume="(\d+).*?class="title">([^>]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for pt, nm, language in matches:
if 'sub' in language.lower() or language not in IDIOMAS:
language = 'VOSE'
post = {'action': 'doo_player_ajax', 'post': pt, 'nume': nm}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post,
headers={'Referer': item.url}).data
hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
new_data = get_source(hidden_url)
matches = scrapertools.find_multiple_matches(new_data, '\["\d+","([^"]+)",\d+\]')
for url in matches:
if not config.get_setting('unify'):
title = ' [%s]' % IDIOMAS[language]
else:
title = ''
url = url.replace('\\/', '/')
if 'playdrive' in url:
new_data = get_source(url)
url = scrapertools.find_single_match(new_data, 'file:"([^"]+)"')
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play',
language=IDIOMAS[language], infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href="([^"]+)">.?<img src="([^"]+)" alt="([^"]+)" />.?<span class="(tvshows|movies)".*?'
patron += '"meta".*?"year">([^<]+)<(.*?)<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, type, year, lang_data, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
language = get_language(lang_data)
if language:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
action=action, type=type, language=language, infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + 'genero/animacion/'
elif categoria == 'terror':
item.url = host + 'genero/terror/'
elif categoria == 'anime':
item.url = host + 'genero/anime/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,88 @@
{
"id": "peliculonhd",
"name": "PeliculonHD",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://peliculonhd.com/wp-content/uploads/2018/09/peliculonnewlogo3-.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos",
"direct"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,405 @@
# -*- coding: utf-8 -*-
# -*- Channel PeliculonHD -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'mx': 'Latino', 'dk':'Latino', 'es': 'Castellano', 'en': 'VOSE', 'gb':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = [
'directo',
'openload',
'rapidvideo',
'jawcloud',
'cloudvideo',
'upvid',
'vevio',
'gamovideo'
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculonhd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculonhd')
host = 'https://peliculonhd.com/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='list_all', type='tv',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'ver', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movie'))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is not None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
for lang in lang_list:
if lang == 'en':
lang = 'vose'
if lang not in language:
language.append(lang)
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
full_data = get_source(host+'/'+item.type)
if 'Genero' in item.title:
data = scrapertools.find_single_match(full_data, '<a href="#">Genero</a>(.*?)</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(full_data, '<a href="#">Año</a>(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
plot=''
title = scrapedtitle
url = host+scrapedurl
if title not in duplicados and title.lower() != 'proximamente':
itemlist.append(Item(channel=item.channel, url=url, title=title, plot=plot, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movie':
patron = '<article id="post-\d+" class="item movies"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '"quality">([^<]+)</span><\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
#language = get_language(lang_data)
if 'proximamente' not in quality.lower():
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality=quality,
type=item.type,
infoLabels={'year':year}))
elif item.type == 'tv':
patron = '<article id="post-\d+" class="item tvshows"><div class="poster">\s?<img src="([^"]+)" '
patron += 'alt="([^"]+)">.*?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
type=item.type,
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
if item.type != 'movie':
item.type = 'tv'
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^ ]+)" />')
url_next_page = 'https:'+ url_next_page
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada \d+'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type='tv',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
import urllib
itemlist = []
data = get_source(item.url)
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
quality = ''
if lang not in IDIOMAS:
lang = 'en'
if not config.get_setting('unify'):
title = ' [%s]' % IDIOMAS[lang]
else:
title = ''
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':item.type}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'
new_data = httptools.downloadpage(test_url, post=post).data
test_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
if 'xyz' in test_url:
new_data = get_source(test_url, item.url)
patron = "addiframe\('([^']+)'"
matches = scrapertools.find_multiple_matches(new_data, patron)
for test_url in matches:
if 'play.php' in test_url:
new_data = get_source(test_url)
enc_data = scrapertools.find_single_match(new_data, '(eval.*?)</script')
dec_data = jsunpack.unpack(enc_data)
url = scrapertools.find_single_match(dec_data, 'src="([^"]+)"')
elif 'embedvip' in test_url:
from lib import generictools
new_data = get_source(test_url)
dejuiced = generictools.dejuice(new_data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title='%s' + title, action='play', quality=quality,
language=IDIOMAS[lang], infoLabels=item.infoLabels))
else:
new_data = get_source(test_url, item.url)
patron = 'data-embed="([^"]+)" data-issuer="([^"]+)" data-signature="([^"]+)"'
matches = scrapertools.find_multiple_matches(new_data, patron)
for st, vt, tk in matches:
post = {'streaming':st, 'validtime':vt, 'token':tk}
post = urllib.urlencode(post)
new_url = '%sedge-data/' % 'https://peliculonhd.net/'
new_data = httptools.downloadpage(new_url, post, headers = {'Referer':test_url}).data
json_data = jsontools.load(new_data)
if 'peliculonhd' not in json_data['url']:
url = json_data['url']
else:
new_data = get_source(json_data['url'], test_url)
url = scrapertools.find_single_match(new_data, 'src: "([^"]+)"')
url = url.replace('download', 'preview')
if url != '':
itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', quality=quality,
language=IDIOMAS[lang], infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href="([^"]+)">.?<img src="([^"]+)" alt="([^"]+)" />.?<span class="(tvshows|movies)".*?'
patron += '"meta".*?"year">([^<]+)<(.*?)<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, type, year, lang_data, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
language = get_language(lang_data)
type = re.sub('shows|s', '', type)
if language:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
action=action, type=type, language=language, infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'ver/'
elif categoria == 'infantiles':
item.url = host + 'genero/animacion/'
elif categoria == 'terror':
item.url = host + 'genero/terror/'
elif categoria == 'documentales':
item.url = host + 'genero/terror/'
item.type='movie'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

19
plugin.video.alfa/channels/pelisfox.json Executable file → Normal file
View File

@@ -1,14 +1,14 @@
{
"id": "pelisfox",
"name": "pelisfox",
"name": "Pelisfox",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s14.postimg.cc/c43etc1lt/pelisfox.png",
"banner": "https://s30.postimg.cc/p6twg905d/pelisfox-banner.png",
"categories": [
"direct",
"movie"
"movie",
"vos"
],
"settings": [
{
@@ -50,6 +50,19 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"VOSE"
]
}
]
}

View File

@@ -10,15 +10,17 @@ from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
from channelselector import get_thumb
tgenero = {"Drama": "https://s16.postimg.cc/94sia332d/drama.png",
u"Accción": "https://s3.postimg.cc/y6o9puflv/accion.png",
u"Animación": "https://s13.postimg.cc/5on877l87/animacion.png",
u"Ciencia Ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
}
audio = {'LAT': '[COLOR limegreen]LATINO[/COLOR]', 'SUB': '[COLOR red]Subtitulado[/COLOR]'}
IDIOMAS = {'latino': 'LAT', 'subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = ['CAM', '360p', '480p', '720p', '1080p']
list_servers = ['vidlox', 'fembed', 'vidcolud', 'streamango', 'openload']
host = 'http://pelisfox.tv'
@@ -26,46 +28,44 @@ host = 'http://pelisfox.tv'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Ultimas",
action="lista",
thumbnail='https://s22.postimg.cc/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.cc/cb7nmhwv5/ultimas.png',
thumbnail=get_thumb('last', auto=True),
url=host + '/estrenos/'
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host,
thumbnail='https://s3.postimg.cc/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
thumbnail=get_thumb('genres', auto=True),
seccion='generos'
))
itemlist.append(item.clone(title="Por Año",
action="seccion",
url=host + '/peliculas/2017/',
thumbnail='https://s8.postimg.cc/7eoedwfg5/pora_o.png',
fanart='https://s8.postimg.cc/7eoedwfg5/pora_o.png',
thumbnail=get_thumb('year', auto=True),
seccion='anios'
))
itemlist.append(item.clone(title="Por Actor",
action="seccion",
url=host + '/actores/',
thumbnail='https://s17.postimg.cc/w25je5zun/poractor.png',
fanart='https://s17.postimg.cc/w25je5zun/poractor.png',
thumbnail=get_thumb('actors', auto=True),
seccion='actor'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/api/elastic/suggest?query=',
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
thumbnail=get_thumb('search', auto=True)
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -140,8 +140,6 @@ def seccion(item):
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.decode('utf-8')
thumbnail = ''
if item.seccion == 'generos':
thumbnail = tgenero[title]
fanart = ''
url = host + scrapedurl
@@ -222,63 +220,37 @@ def search(item, texto):
def findvideos(item):
logger.info()
itemlist = []
templist = []
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
links = scrapertools.find_single_match(data, '<script>var.*?_SOURCE.?=.?(.*?);')
links = links.replace('false', '"false"').replace('true', '"true"')
links = eval(links)
for link in links:
language = link['lang']
quality = link['quality']
url = link['source'].replace('\\/', '/')
sub = link['srt']
patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
for quality, lang, scrapedurl in matches:
url = host + scrapedurl
title = item.title + ' (' + lang + ') (' + quality + ')'
templist.append(item.clone(title=title,
language=lang,
url=url
))
for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_single_match(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
urls_list = urls_list.split("},")
for element in urls_list:
if not element.endswith('}'):
element=element+'}'
json_data = jsontools.load(element)
if 'id' in json_data:
id = json_data['id']
sub=''
if 'srt' in json_data:
sub = json_data['srt']
if config.get_setting('unify'):
title = ''
else:
title = ' [%s] [%s]' % (quality, language)
url = json_data['source'].replace('\\','')
server = json_data['server']
quality = json_data['quality']
if 'http' not in url :
itemlist.append(Item(channel=item.channel, action='play', title='%s'+title, url=url, quality=quality,
language=IDIOMAS[language], subtitle=sub, infoLabels=item.infoLabels))
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)
video_list.extend(servertools.find_video_items(data=data))
for urls in video_list:
if urls.language == '':
urls.language = videoitem.language
urls.title = item.title + urls.language + '(%s)'
# Requerido para FilterTools
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
video_url.quality = quality
video_url.server = ""
video_url.infoLabels = item.infoLabels
else:
title = '%s [%s]'% (server, quality)
video_list.append(item.clone(title=title, url=url, action='play', quality = quality,
server=server, subtitle=sub))
tmdb.set_infoLabels(video_list)
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
video_list.append(
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
@@ -286,7 +258,7 @@ def findvideos(item):
extra="findvideos",
contentTitle=item.contentTitle
))
return video_list
return itemlist
def newest(categoria):

View File

@@ -14,6 +14,7 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from lib import generictools
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
@@ -157,7 +158,7 @@ def seasons(item):
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
@@ -214,19 +215,32 @@ def section(item):
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
servers_page = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = get_source(servers_page, referer=item.url)
data = get_source(servers_page)
patron = '<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url in matches:
url_data = get_source(enc_url, referer=item.url)
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
hidden_url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
if 'server' in hidden_url:
hidden_data = get_source(hidden_url)
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = hidden_url
if 'pelishd.tv' in url:
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
dejuiced = generictools.dejuice(vip_data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
language = 'latino'
if not config.get_setting('unify'):
title = ' [%s]' % language.capitalize()

View File

@@ -301,7 +301,7 @@ def seasons(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
@@ -322,10 +322,13 @@ def season_episodes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
full_data = httptools.downloadpage(item.url).data
full_data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", full_data)
season = str(item.infoLabels['season'])
patron = '<a href=(.*?temporada-%s\/.*?) title=.*?i-play><\/i> (.*?)<\/a>'%season
if int(season) <= 9:
season = '0'+season
data = scrapertools.find_single_match(full_data, '</i>Temporada %s</div>(.*?)(?:down arrow|cuadre_comments)' % season)
patron = '<a href="([^"]+)" title=".*?i-play"><\/i> (.*?)<\/a>'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for url, episode in matches:
@@ -390,6 +393,7 @@ def findvideos(item):
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron_language ='(<ul id=level\d_.*?\s*class=.*?ul>)'
matches = re.compile(patron_language, re.DOTALL).findall(data)

View File

@@ -291,7 +291,7 @@ def findvideos(item):
url = data_url.headers['location']
except:
pass
url = url.replace(" ", "%20")
itemlist.append(item.clone(title = '[%s] [%s]', url=url, action='play', subtitle=subs,
language=language, quality=quality, infoLabels=item.infoLabels))

View File

@@ -43,6 +43,7 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png",
"adults": "https://s10.postimg.cc/s8raxc51l/adultos.png",
"recents": "https://s10.postimg.cc/649u24kp5/recents.png",
"updated" : "https://s10.postimg.cc/46m3h6h9l/updated.png",
"actors": "https://i.postimg.cc/tC2HMhVV/actors.png",
"accion": "https://s14.postimg.cc/sqy3q2aht/action.png",
"adolescente" : "https://s10.postimg.cc/inq7u4p61/teens.png",
"adultos": "https://s10.postimg.cc/s8raxc51l/adultos.png",
@@ -87,6 +88,7 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png",
"romance" : "https://s10.postimg.cc/yn8vdll6x/romance.png",
"romantica": "https://s14.postimg.cc/8xlzx7cht/romantic.png",
"suspenso": "https://s10.postimg.cc/7peybxdfd/suspense.png",
"telenovelas": "https://i.postimg.cc/QCXZkyDM/telenovelas.png",
"terror": "https://s14.postimg.cc/thqtvl52p/horror.png",
"thriller": "https://s14.postimg.cc/uwsekl8td/thriller.png",
"western": "https://s10.postimg.cc/5wc1nokjt/western.png"

View File

@@ -3251,7 +3251,7 @@ msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
msgstr "Peliculas"
msgctxt "#70138"
msgid "Low Rating"

View File

@@ -3251,7 +3251,7 @@ msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
msgstr "Peliculas"
msgctxt "#70138"
msgid "Low Rating"

View File

@@ -3251,7 +3251,7 @@ msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
msgstr "Peliculas"
msgctxt "#70138"
msgid "Low Rating"

4
plugin.video.alfa/servers/streamango.json Executable file → Normal file
View File

@@ -6,6 +6,10 @@
{
"pattern": "streamango.com/(?:embed|f)/([A-z0-9]+)",
"url": "http://streamango.com/embed/\\1"
},
{
"pattern": "https://fruitadblock.net/embed/([A-z0-9]+)",
"url": "http://streamango.com/embed/\\1"
}
]
},

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://tiwi.kiwi/embed-([a-z0-9]+).html",
"url": "https://tiwi.kiwi/embed-\\1.html"
}
]
},
"free": true,
"id": "tiwikiwi",
"name": "tiwikiwi",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/CxdyWRcN/tiwikiwi.png"
}

View File

@@ -0,0 +1,30 @@
# Conector vidcloud By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Cloud] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(eval.*?)?\s+</script>")
dec_data = jsunpack.unpack(enc_data)
sources = 'file:"([^"]+)",label:"([^"]+)"'
matches = re.compile(sources, re.DOTALL).findall(dec_data)
for url, quality in matches:
video_url = url
video_urls.append(['tiwi.kiwi [%s]' % quality, video_url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://vidcloud.co/embed/([a-z0-9]+)",
"url": "https://vidcloud.co/player?fid=\\1&page=embed"
}
]
},
"free": true,
"id": "vidcloud",
"name": "vidcloud",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/xjpwG0rK/0a-RVDzlb-400x400.jpg"
}

View File

@@ -0,0 +1,29 @@
# Conector vidcloud By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Cloud] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = data.replace('\\\\', '\\').replace('\\','')
patron = '"file":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(['vidcloud', url])
return video_urls