Merge pull request #484 from Alfa-beto/fixes

Correcciones y novedades
This commit is contained in:
Alfa
2018-11-07 11:45:55 -05:00
committed by GitHub
7 changed files with 627 additions and 21 deletions

View File

@@ -0,0 +1,53 @@
{
"id": "animeboom",
"name": "AnimeBoom",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://animeboom.net/images/logo.png",
"banner": "",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"VOSE"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,281 @@
# -*- coding: utf-8 -*-
# -*- Channel AnimeBoom -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
host = "https://animeboom.net/"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animeboom')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animeboom')
IDIOMAS = {'Latino':'LAT', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo', 'openload', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel, title="Ultimas",
action="list_all",
thumbnail=get_thumb('last', auto=True),
url=host + 'emision'))
itemlist.append(Item(channel=item.channel, title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'series'))
itemlist.append(Item(channel=item.channel, title="Series",
action="list_all",
thumbnail=get_thumb('tvshows', auto=True),
url=host + 'tv'))
itemlist.append(Item(channel=item.channel, title="Películas",
action="list_all",
thumbnail=get_thumb('movies', auto=True),
url=host + 'peliculas'))
itemlist.append(Item(channel=item.channel, title="OVAs",
action="list_all",
thumbnail='',
url=host + 'ova'))
itemlist.append(Item(channel=item.channel, title="ONAs",
action="list_all",
thumbnail='',
url=host + 'ona'))
itemlist.append(Item(channel=item.channel, title="Especiales",
action="list_all",
thumbnail='',
url=host + '/specials'))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + 'search?s=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article class="([^"]+)"><figure class="image"><a href="([^"]+)" title=".*?">'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?class="year">(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for type, scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
if 'latino' in scrapedtitle.lower():
lang = 'Latino'
else:
lang = 'VOSE'
title = re.sub('Audio Latino', '', scrapedtitle)
itemlist.append(Item(channel=item.channel, action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title,
language = lang,
infoLabels={'year':year}
))
# Paginacion
next_page = scrapertools.find_single_match(data,
'<a href="([^"]+)" rel="next">&raquo;</a>')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search_results(item):
logger.info()
itemlist=[]
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<div class="search-results">(.*?)<h4')
patron = '<a href="([^"]+)".*?<img src="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
title = re.sub('online|Audio|Latino', '', scrapedtitle)
itemlist.append(Item(channel=item.channel,
action="episodios",
title=title,
url=url,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return search_results(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def new_episodes(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '>Episodios Estreno</h2>(.*?)</section>')
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
if 'latino' in scrapedtitle.lower():
lang = 'Latino'
else:
lang = 'VOSE'
title = re.sub('sub|Sub|Español|español|Audio|Latino|audio|latino','', scrapedtitle)
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
action='findvideos', language=lang))
return itemlist
def episodios(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<ul class="list-episodies scrolling">(.*?)</ul>')
patron = '<a href="([^"]+)".*?title="([^"]+)".*?Episodio (\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, title, episode in matches:
if 'latino' in title.lower():
lang='Latino'
else:
lang = 'VOSE'
title = "1x" + episode + " - Episodio %s" % episode
url = scrapedurl
infoLabels['season'] = '1'
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=lang, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
#return
patron = 'video\[\d+\] = \'<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if 'animeboom' in scrapedurl:
new_data = get_source(scrapedurl)
scrapedurl = scrapertools.find_single_match(new_data, 'src:"([^,]+)",')
if scrapedurl != '':
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play', language = item.language,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
if categoria == 'anime':
item.url=host
itemlist = new_episodes(item)
return itemlist

View File

@@ -239,7 +239,6 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
selector_url = scrapertools.find_multiple_matches(data, 'class=metaframe rptss src=(.*?) frameborder=0 ')
for lang in selector_url:
@@ -259,13 +258,12 @@ def findvideos(item):
if language == 'VOSE':
sub = scrapertools.find_single_match(url, 'sub=(.*?)&')
subs = 'https:%s' % sub
if 'index' in url:
try:
file_id = scrapertools.find_single_match(url, 'file=(.*?)&')
post = {'link': file_id}
post = urllib.urlencode(post)
hidden_url = 'https://streamango.poseidonhd.net/repro/plugins/gkpluginsphp.php'
hidden_url = 'https://streamango.poseidonhd.io/repro/plugins/gkpluginsphp.php'
data_url = httptools.downloadpage(hidden_url, post=post).data
dict_vip_url = jsontools.load(data_url)
url = dict_vip_url['link']
@@ -278,15 +276,16 @@ def findvideos(item):
file_id = scrapertools.find_single_match(url, 'h=(\w+)')
post = {'h': file_id}
post = urllib.urlencode(post)
hidden_url = 'https://streamango.poseidonhd.net/repro/openload/api.php'
hidden_url = 'https://streamango.poseidonhd.io/repro/openload/api.php'
data_url = httptools.downloadpage(hidden_url, post=post, follow_redirects=False).data
json_data = jsontools.load(data_url)
url = json_data['url']
else:
file_id = scrapertools.find_single_match(url, 'url=(.*?)&')
new_data = httptools.downloadpage('https:'+url).data
file_id = scrapertools.find_single_match(new_data, 'value="([^"]+)"')
post = {'url': file_id}
post = urllib.urlencode(post)
hidden_url = 'https://streamango.poseidonhd.net/repro/r.php'
hidden_url = 'https://streamango.poseidonhd.io/repro/r.php'
data_url = httptools.downloadpage(hidden_url, post=post, follow_redirects=False)
url = data_url.headers['location']
except:

View File

@@ -147,25 +147,24 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
_sa = scrapertools.find_single_match(data, 'var _sa = (true|false);')
_sl = scrapertools.find_single_match(data, 'var _sl = ([^;]+);')
sl = eval(_sl)
#buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">([^<]+)</button>')
buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">([^<]+)</button>')
for id, title in buttons:
new_url = golink(int(id), _sa, sl)
data = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data, 'var x0x = ([^;]+);')
x0x = eval(_x0x)
#for id, title in buttons:
new_url = golink(0, _sa, sl)
data = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data, 'var x0x = ([^;]+);')
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools

View File

@@ -0,0 +1,43 @@
{
"id": "seriesmetro",
"name": "SeriesMetro",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://i.postimg.cc/Kzh6DCqD/seriesmetro.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,224 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesMetro -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://seriesmetro.com/'
list_language = []
list_quality = []
list_servers = ['openload', 'dailymotion']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'seriesmetro')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'seriesmetro')
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist =[]
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section",
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
contentSerieName = scrapedtitle
action = 'seasons'
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url, thumbnail=thumbnail,
contentSerieName=contentSerieName, action=action)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" >Página siguiente')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
type=item.type))
return itemlist
def section(item):
itemlist = []
full_data = get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(full_data, '>Géneros</a>(.*?)</ul>')
elif item.title == 'Alfabetico':
data = scrapertools.find_single_match(full_data, '<ul id="menu-top"(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle != 'Series':
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'Temporada (\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
if len(matches) > 0:
for scrapedseason in matches:
title = 'Temporada %s' % scrapedseason
infoLabels['season'] = scrapedseason
itemlist.append(Item(channel=item.channel, action='episodesxseason', url=item.url, title=title,
infoLabels=infoLabels))
else:
infoLabels['season'] = '1'
itemlist.append(Item(channel=item.channel, action='episodesxseason', url=item.url, title='Temporada 1',
infoLabels=infoLabels, single=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.infoLabels['season']
full_data = get_source(item.url)
if item.single:
data = scrapertools.find_single_match(full_data, '<strong>Capítulos.*?</strong>(.*?)</ul>')
else:
data = scrapertools.find_single_match(full_data, 'Temporada %s.*?</strong>(.*?)</ul>' % season)
patron = '<a href="([^"]+)">.*?;.?([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
epi = 1
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
contentEpisodeNumber = str(epi)
title = '%sx%s - %s ' % (season, contentEpisodeNumber, scrapedtitle)
infoLabels['episode'] = contentEpisodeNumber
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentSerieName=item.contentSerieName, contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels))
epi += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, text):
logger.info()
item.url = item.url + text
if text != '':
return list_all(item)
def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = 'iframe src="([^&]+)&'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
if 'id=' in link:
id_type = 'id'
ir_type = 'ir'
elif 'ud=' in link:
id_type = 'ud'
ir_type = 'ur'
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)
ir = id[::-1]
referer = base_link+'%s=%s&/' % (id_type, ir)
video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer':referer},
follow_redirects=False)
url = video_data.headers['location']
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
language='', infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -141,8 +141,15 @@ def findvideos(item):
hidden_url = get_source('%splayer/rep/%s' % (host, scraped_id), player)
url = scrapertools.find_single_match(hidden_url, 'iframe src=.?"([^"]+)"').replace('\\','')
lang = get_language(lang_data)
itemlist.append(Item(channel=item.channel, title='%s', url=url, action='play', language=lang,
infoLabels=item.infoLabels))
if not config.get_setting('unify'):
title = ' %s' % lang
else:
title = ''
if url != '':
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())