correcciones y novedades

This commit is contained in:
Unknown
2018-10-24 15:54:18 -03:00
parent 6b946c2de9
commit 5368826cbb
8 changed files with 423 additions and 19 deletions

View File

@@ -98,7 +98,7 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
patron = '<div class="pagina">(.*?)</ul>'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)

View File

@@ -231,6 +231,8 @@ def findvideos(item):
url = '%s/%s' % (server, id)
if server != '' and id != '':
language = IDIOMAS[language]
if quality.lower() == 'premium':
quality = '720p'
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,

View File

@@ -76,7 +76,7 @@ def menu_movies(item):
def get_source(url, referer=None):
logger.info()
if referer is not None:
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
@@ -262,7 +262,7 @@ def findvideos(item):
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'
new_data = httptools.downloadpage(test_url, post=post).data
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
test_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
if 'xyz' in test_url:
new_data = get_source(test_url, item.url)
@@ -287,7 +287,6 @@ def findvideos(item):
language=IDIOMAS[lang], infoLabels=item.infoLabels))
else:
new_data = get_source(test_url, item.url)
patron = 'data-embed="([^"]+)" data-issuer="([^"]+)" data-signature="([^"]+)"'
matches = scrapertools.find_multiple_matches(new_data, patron)

View File

@@ -45,7 +45,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'tvshows', action='list_all', type='tvshows',
itemlist.append(Item(channel=item.channel, title='Series', url=host+'tvshows', action='list_all', type='tv',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
@@ -61,11 +61,11 @@ def menu_movies(item):
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
thumbnail=get_thumb('all', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
thumbnail=get_thumb('genres', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
thumbnail=get_thumb('year', auto=True), type='movie'))
return itemlist
@@ -124,7 +124,7 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
if item.type == 'movie':
patron = '<article id="post-\d+" class="item movies"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '"quality">([^<]+)</span><\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -144,9 +144,10 @@ def list_all(item):
thumbnail=thumbnail,
contentTitle=contentTitle,
quality=quality,
type=item.type,
infoLabels={'year':year}))
elif item.type == 'tvshows':
elif item.type == 'tv':
patron = '<article id="post-\d+" class="item tvshows"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">.*?<span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -162,6 +163,7 @@ def list_all(item):
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
type=item.type,
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
@@ -188,7 +190,7 @@ def seasons(item):
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -225,7 +227,8 @@ def episodesxseasons(item):
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -244,7 +247,7 @@ def findvideos(item):
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
quality = ''
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option}
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'movie'}
post = urllib.urlencode(post)
test_url = 'https://pelisr.com/wp-admin/admin-ajax.php'
new_data = httptools.downloadpage(test_url, post=post).data
@@ -255,13 +258,15 @@ def findvideos(item):
title = '%s'
if 'drive' in scrapedurl:
enc_data = httptools.downloadpage(scrapedurl, headers = {'Referer':item.url}).data
dec_data = generictools.dejuice(enc_data)
url, quality = scrapertools.find_single_match(dec_data, '"file":"(.*?)","label":"(.*?)"')
try:
enc_data = httptools.downloadpage(scrapedurl, headers = {'Referer':item.url}).data
dec_data = generictools.dejuice(enc_data)
url, quality = scrapertools.find_single_match(dec_data, '"file":"(.*?)","label":"(.*?)"')
except:
pass
else:
url = scrapedurl
url = url +"|referer=%s" % item.url
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
@@ -348,7 +353,7 @@ def newest(categoria):
item.url = host + 'genre/animacion/'
elif categoria == 'terror':
item.url = host + 'genre/terror/'
item.type='movies'
item.type='movie'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()

View File

@@ -0,0 +1,70 @@
{
"id": "zonaworld",
"name": "Zona World",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://i.postimg.cc/tR67dQzH/zona-world.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,259 @@
# -*- coding: utf-8 -*-
# -*- Channel Zona World -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://peliculas.zona-world.com/'
IDIOMAS = {'Latino': 'LAT', 'Español': 'CAST', 'Subtitulado': 'VOSE', 'Ingles': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'vidoza', 'openload', 'gvideo', 'fex', 'okru']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'pelicula/',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def list_all(item):
logger.info()
itemlist = []
try:
patron = '<article id="post-.*?<a href="([^"]+)">.*?"Langu">([^<]+)<.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.*?date_range">(\d{4})<'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, language, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle = scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
contentTitle = re.sub('\(.*?\)', '', contentTitle)
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=IDIOMAS[language],
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
except:
pass
return itemlist
def search_results(item):
logger.info()
itemlist = []
try:
patron = '<article id="post-.*?<a href="([^"]+)">.*?src="([^"]+)".*?"Langu">([^<]+)<.*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.*?date_range">(\d{4})<'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, language, scrapedtitle, year in matches:
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle = scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
contentTitle = re.sub('\(.*?\)', '', contentTitle)
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=IDIOMAS[language],
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
except:
pass
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'genre':
data = scrapertools.find_single_match(data, '>Género</div>(.*?)</ul>')
patron = '<a href="([^"]+)".*?>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title=title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"', '').replace('&#038;', '&')
data_video = get_source(scrapedurl)
opt_data = scrapertools.find_single_match(data, '"%s"><span>.*?</span>.*?<span>([^<]+)</span>' %
option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
quality = re.sub('Full|HD', '', quality).strip()
if 'rip' in quality.lower():
quality = '720P'
if not config.get_setting('unify'):
title = ' [%s] [%s]' % (quality, IDIOMAS[language])
else:
title = ''
try:
url, tid = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="(.*?tid=)([^&]+)&"')
referer = url + tid
tid = tid[::-1]
url = url.replace('&tid=', '&trhex=')
new_data = httptools.downloadpage(url + tid, follow_redirects=False)
if 'location' in new_data.headers:
new_url = new_data.headers['location']
if 'rapidvideo' in new_url:
id = scrapertools.find_single_match(new_url, 'id=([^&]+)&')
url = 'https://wwww.rapidvideo.com/e/%s' % id
elif 'fex' in new_url:
new_data = get_source(new_url)
id = scrapertools.find_single_match(new_data, "id=([^']+)'")
url = 'https://fex.net/load/%s' % id
else:
new_data = get_source(new_url)
url = scrapertools.find_single_match(new_data, 'iframe src="([^"]+)"')
except:
url = scrapertools.find_single_match(data_video, 'src="([^"]+)" frameborder')
if 'rapidvideo' in url:
id = scrapertools.find_single_match(url, 'id=([^&]+)&')
url = 'https://wwww.rapidvideo.com/e/%s' % id
if 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
elif url != '':
itemlist.append(Item(channel=item.channel, title='%s' + title, action='play', url=url,
language=IDIOMAS[language], quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s' % i.server.capitalize())
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'infantiles':
item.url = host + '/generos/animacion'
elif categoria == 'terror':
item.url = host + '/generos/terror'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://fex.net/load/[A-z0-9]+/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "fex",
"name": "Fex",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/pdswzj8G/fex.png",
"version": 1
}

View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
# -*- Server Fex -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, follow_redirects=False)
if data.code == 404:
return False, "[Fex] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True)
logger.debug(data.headers)
url = data.headers['location']
video_urls.append(['Fex', url])
return video_urls