Merge pull request #471 from Alfa-beto/fixes

correcciones y novedades
This commit is contained in:
Alfa
2018-10-25 08:25:53 -05:00
committed by GitHub
11 changed files with 727 additions and 19 deletions

View File

@@ -98,7 +98,7 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
patron = '<div class="pagina">(.*?)</ul>'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)

View File

@@ -231,6 +231,8 @@ def findvideos(item):
url = '%s/%s' % (server, id)
if server != '' and id != '':
language = IDIOMAS[language]
if quality.lower() == 'premium':
quality = '720p'
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,

View File

@@ -76,7 +76,7 @@ def menu_movies(item):
def get_source(url, referer=None):
logger.info()
if referer is not None:
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
@@ -262,7 +262,7 @@ def findvideos(item):
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'
new_data = httptools.downloadpage(test_url, post=post).data
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
test_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
if 'xyz' in test_url:
new_data = get_source(test_url, item.url)
@@ -287,7 +287,6 @@ def findvideos(item):
language=IDIOMAS[lang], infoLabels=item.infoLabels))
else:
new_data = get_source(test_url, item.url)
patron = 'data-embed="([^"]+)" data-issuer="([^"]+)" data-signature="([^"]+)"'
matches = scrapertools.find_multiple_matches(new_data, patron)

View File

@@ -45,7 +45,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'tvshows', action='list_all', type='tvshows',
itemlist.append(Item(channel=item.channel, title='Series', url=host+'tvshows', action='list_all', type='tv',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
@@ -61,11 +61,11 @@ def menu_movies(item):
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
thumbnail=get_thumb('all', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
thumbnail=get_thumb('genres', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
thumbnail=get_thumb('year', auto=True), type='movie'))
return itemlist
@@ -124,7 +124,7 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
if item.type == 'movie':
patron = '<article id="post-\d+" class="item movies"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '"quality">([^<]+)</span><\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -144,9 +144,10 @@ def list_all(item):
thumbnail=thumbnail,
contentTitle=contentTitle,
quality=quality,
type=item.type,
infoLabels={'year':year}))
elif item.type == 'tvshows':
elif item.type == 'tv':
patron = '<article id="post-\d+" class="item tvshows"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">.*?<span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -162,6 +163,7 @@ def list_all(item):
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
type=item.type,
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
@@ -188,7 +190,7 @@ def seasons(item):
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -225,7 +227,8 @@ def episodesxseasons(item):
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -244,7 +247,7 @@ def findvideos(item):
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
quality = ''
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option}
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'movie'}
post = urllib.urlencode(post)
test_url = 'https://pelisr.com/wp-admin/admin-ajax.php'
new_data = httptools.downloadpage(test_url, post=post).data
@@ -255,13 +258,15 @@ def findvideos(item):
title = '%s'
if 'drive' in scrapedurl:
enc_data = httptools.downloadpage(scrapedurl, headers = {'Referer':item.url}).data
dec_data = generictools.dejuice(enc_data)
url, quality = scrapertools.find_single_match(dec_data, '"file":"(.*?)","label":"(.*?)"')
try:
enc_data = httptools.downloadpage(scrapedurl, headers = {'Referer':item.url}).data
dec_data = generictools.dejuice(enc_data)
url, quality = scrapertools.find_single_match(dec_data, '"file":"(.*?)","label":"(.*?)"')
except:
pass
else:
url = scrapedurl
url = url +"|referer=%s" % item.url
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
@@ -348,7 +353,7 @@ def newest(categoria):
item.url = host + 'genre/animacion/'
elif categoria == 'terror':
item.url = host + 'genre/terror/'
item.type='movies'
item.type='movie'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()

View File

@@ -0,0 +1,55 @@
{
"id": "repelislive",
"name":"Repelis.live",
"thumbnail":"https://i.postimg.cc/j5ndjr3j/repelislive.png",
"banner":"",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,246 @@
# -*- coding: utf-8 -*-
# -*- Channel Repelis.live -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from channels import filtertools
from channels import autoplay
IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'rapidvideo', 'netutv']
host = "http://repelis.live/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Ultimas",
action="list_all",
url=host,
thumbnail=get_thumb("last", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Castellano",
action="list_all",
url=host+'pelis-castellano/',
thumbnail=get_thumb("cast", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Latino",
action="list_all",
url=host+'pelis-latino/',
thumbnail=get_thumb("lat", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="VOSE",
action="list_all",
url=host+'pelis-subtitulado/',
thumbnail=get_thumb("vose", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Generos",
action="categories",
url=host,
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(
Item(channel=item.channel,
title="Por Año",
action="categories",
url=host,
thumbnail=get_thumb('year', auto=True)
))
itemlist.append(
Item(channel=item.channel,
title="Buscar",
action="search",
url=host + '?s=',
thumbnail=get_thumb("search", auto=True)
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def categories(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title != 'Generos':
patron = '<option value="([^"]+)">([^<]+)</option>'
else:
data = scrapertools.find_single_match(data, '</span>Categories</h3><ul>(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel,
action="list_all",
title=title,
url=url
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Buscar':
pattern = '<div class="row"> <a href="([^"]+)" title="([^\(]+)\(.*?">.*?<img src="([^"]+)".*?'
pattern += '<p class="main-info-list">Pelicula del (\d{4})'
else:
pattern = '<div class="col-mt-5 postsh">.?<div class="poster-media-card"> <a href="([^"]+)" '
pattern += 'title="([^\(]+)\(.*?">.*?"anio".*?>(\d{4}).*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, pattern)
for url, title, year, thumb in matches:
new_item = Item(channel=item.channel,
title=title,
url=url,
action='findvideos',
contentTitle=title,
thumbnail=thumb,
infoLabels = {'year': year}
)
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=next_page,
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
trailer = ''
data = get_source(item.url)
patron = '<a href="#embed\d+".*?data-src="([^"]+)".*?"tab">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, language in matches:
data = httptools.downloadpage(url, follow_redirects=False, headers={'Referer':item.url}, only_headers=True)
url = data.headers['location']
if config.get_setting('unify'):
title = ''
else:
title = ' [%s]' % language
if 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
else:
itemlist.append(Item(channel=item.channel,
title='%s'+title,
url=url,
action='play',
language=IDIOMAS[language],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if trailer != '':
itemlist.append(trailer)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
if texto != '':
item.url += texto
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(category):
logger.info()
item = Item()
try:
if category == 'peliculas':
item.url = host
elif category == 'infantiles':
item.url = host + 'category/animacion'
elif category == 'terror':
item.url = host + 'category/terror'
itemlist = list_all(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -0,0 +1,70 @@
{
"id": "zonaworld",
"name": "Zona World",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://i.postimg.cc/tR67dQzH/zona-world.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,259 @@
# -*- coding: utf-8 -*-
# -*- Channel Zona World -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://peliculas.zona-world.com/'
IDIOMAS = {'Latino': 'LAT', 'Español': 'CAST', 'Subtitulado': 'VOSE', 'Ingles': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'vidoza', 'openload', 'gvideo', 'fex', 'okru']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'pelicula/',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def list_all(item):
logger.info()
itemlist = []
try:
patron = '<article id="post-.*?<a href="([^"]+)">.*?"Langu">([^<]+)<.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.*?date_range">(\d{4})<'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, language, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle = scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
contentTitle = re.sub('\(.*?\)', '', contentTitle)
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=IDIOMAS[language],
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
except:
pass
return itemlist
def search_results(item):
logger.info()
itemlist = []
try:
patron = '<article id="post-.*?<a href="([^"]+)">.*?src="([^"]+)".*?"Langu">([^<]+)<.*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.*?date_range">(\d{4})<'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, language, scrapedtitle, year in matches:
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle = scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
contentTitle = re.sub('\(.*?\)', '', contentTitle)
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=IDIOMAS[language],
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
except:
pass
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'genre':
data = scrapertools.find_single_match(data, '>Género</div>(.*?)</ul>')
patron = '<a href="([^"]+)".*?>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title=title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"', '').replace('&#038;', '&')
data_video = get_source(scrapedurl)
opt_data = scrapertools.find_single_match(data, '"%s"><span>.*?</span>.*?<span>([^<]+)</span>' %
option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
quality = re.sub('Full|HD', '', quality).strip()
if 'rip' in quality.lower():
quality = '720P'
if not config.get_setting('unify'):
title = ' [%s] [%s]' % (quality, IDIOMAS[language])
else:
title = ''
try:
url, tid = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="(.*?tid=)([^&]+)&"')
referer = url + tid
tid = tid[::-1]
url = url.replace('&tid=', '&trhex=')
new_data = httptools.downloadpage(url + tid, follow_redirects=False)
if 'location' in new_data.headers:
new_url = new_data.headers['location']
if 'rapidvideo' in new_url:
id = scrapertools.find_single_match(new_url, 'id=([^&]+)&')
url = 'https://wwww.rapidvideo.com/e/%s' % id
elif 'fex' in new_url:
new_data = get_source(new_url)
id = scrapertools.find_single_match(new_data, "id=([^']+)'")
url = 'https://fex.net/load/%s' % id
else:
new_data = get_source(new_url)
url = scrapertools.find_single_match(new_data, 'iframe src="([^"]+)"')
except:
url = scrapertools.find_single_match(data_video, 'src="([^"]+)" frameborder')
if 'rapidvideo' in url:
id = scrapertools.find_single_match(url, 'id=([^&]+)&')
url = 'https://wwww.rapidvideo.com/e/%s' % id
if 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
elif url != '':
itemlist.append(Item(channel=item.channel, title='%s' + title, action='play', url=url,
language=IDIOMAS[language], quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s' % i.server.capitalize())
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'infantiles':
item.url = host + '/generos/animacion'
elif categoria == 'terror':
item.url = host + '/generos/terror'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -44,6 +44,9 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png",
"recents": "https://s10.postimg.cc/649u24kp5/recents.png",
"updated" : "https://s10.postimg.cc/46m3h6h9l/updated.png",
"actors": "https://i.postimg.cc/tC2HMhVV/actors.png",
"cast": "https://i.postimg.cc/qvfP5Xvt/cast.png",
"lat": "https://i.postimg.cc/Gt8fMH0J/lat.png",
"vose": "https://i.postimg.cc/kgmnbd8h/vose.png",
"accion": "https://s14.postimg.cc/sqy3q2aht/action.png",
"adolescente" : "https://s10.postimg.cc/inq7u4p61/teens.png",
"adultos": "https://s10.postimg.cc/s8raxc51l/adultos.png",

View File

@@ -0,0 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://fex.net/load/[A-z0-9]+/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "fex",
"name": "Fex",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/pdswzj8G/fex.png",
"version": 1
}

View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
# -*- Server Fex -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, follow_redirects=False)
if data.code == 404:
return False, "[Fex] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True)
logger.debug(data.headers)
url = data.headers['location']
video_urls.append(['Fex', url])
return video_urls