Merge pull request #309 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-06-14 12:37:52 -05:00
committed by GitHub
13 changed files with 392 additions and 126 deletions

View File

@@ -0,0 +1,30 @@
{
"id": "animejl",
"name": "AnimeJL",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://www.animejl.net/img/Logo.png",
"banner": "",
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,198 @@
# -*- coding: utf-8 -*-
# -*- Channel AnimeJL -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from channelselector import get_thumb
host = 'https://www.animejl.net/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios", action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True), url=host))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", thumbnail=get_thumb('all', auto=True),
url=host + 'animes'))
itemlist.append(Item(channel=item.channel, title="Series", action="list_all",
thumbnail=get_thumb('tvshows', auto=True), url=host+'animes?type%5B%5D=1&order=default'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
thumbnail=get_thumb('movies',auto=True), url=host + 'animes?type%5B%5D=2&order=default'))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
url=host + 'animes?q='))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
return data
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, "<h2>Últimos episodios</h2>.*?</ul>")
patron = "<li><a href='(.*?)' class.*?<img src='(.*?)' alt='(.*?)'></span><span class='Capi'>(.*?)</span>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepi in matches:
url = host+scrapedurl
thumbnail = host+scrapedthumbnail
title = '%s %s' % (scrapedtitle, scrapedepi)
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=scrapedtitle,
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<article class='Anime alt B'><a href='(.*?)'>.*?class=.*?<img src='(.*?)' alt='(.*?)'>"
patron +="</figure><span class='Type .*?'>(.*?)</span>.*?star.*?<p>(.*?)</p>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, type, plot in matches:
url = host + scrapedurl
thumbnail = host+scrapedthumbnail
title = scrapedtitle
type = type
season = ''
if 'season' in scrapedtitle.lower():
season = scrapertools.find_single_match(scrapedtitle, 'season (\d+)')
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '(.*?) season')
new_item = Item(channel=item.channel, action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=scrapedtitle,
plot=plot,
type=item.type,
infoLabels={}
)
if type.lower() == 'anime':
new_item.contentSerieName = scrapedtitle
new_item.contentSeasonNumber = season
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
# Paginacion
next_page = scrapertools.find_single_match(data,
"<li><a href='([^']+)'><span>&raquo;</span></a></li></ul>")
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=host+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
base_data = get_source(item.url)
data = scrapertools.find_single_match(base_data, '<div class=Title>Lista de episodios</div>.*?</ul>')
if data == '':
data = scrapertools.find_single_match(base_data, '<div class=Title>Formatos disponibles</div>.*?</ul>')
if 'listepisodes' in data.lower():
patron = "<li><a href='(.*?)' class.*?>(.*?)<i class='fa-eye-slash'></i></a></li>"
elif 'listcaps' in data.lower():
patron = "<a href=(.*?)>.*?alt=(.*?)>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
n=0
for char in title[::-1]:
n += 1
if char == ' ':
break
episode = title[-n:]
episode = scrapertools.find_single_match(episode, r' (\d+)')
url = host + scrapedurl
itemlist.append(Item(channel=item.channel, title='Episodio %s' % episode, thumbnail=item.thumbnail, url=url,
action='findvideos'))
if item.type.lower != 'anime' and len(itemlist)==1:
return findvideos(itemlist[0])
else:
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.title = '[%s]' % videoitem.server.capitalize()
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'anime':
item.url = host
itemlist = new_episodes(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []

View File

@@ -11,6 +11,7 @@ from core.item import Item
from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = ''
@@ -28,6 +29,7 @@ host = "http://animemovil.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
@@ -48,6 +50,8 @@ def mainlist(item):
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
if renumbertools.context:
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
@@ -280,71 +284,42 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\s{2,}', '', data)
strm_id = scrapertools.find_single_match(data, '"id": (.*?),')
streams = scrapertools.find_single_match(data, '"stream": (.*?)};')
dict_strm = jsontools.load(streams)
akiba_url = scrapertools.find_single_match(data, '<div class="x-link"><a href="(.*?)"')
url = httptools.downloadpage('http:'+akiba_url, follow_redirects=False).headers.get('location')
title = '%s (%s)' % (item.title, 'akiba')
itemlist.append(item.clone(title=title, url=url, action='play'))
base_url = 'http:%s%s/' % (dict_strm['accessPoint'], strm_id)
for server in dict_strm['servers']:
expire = dict_strm['expire']
signature = dict_strm['signature']
last_modify = dict_strm['last_modify']
callback = 'playerWeb'
info = scrapertools.find_single_match(data, 'episodio_info=(.*?);')
dict_info = jsontools.load(info)
servers = dict_info['stream']['servers']
id = dict_info['id']
access_point = dict_info['stream']['accessPoint']
expire = dict_info['stream']['expire']
callback = dict_info['stream']['callback']
signature = dict_info['stream']['signature']
last_modify = dict_info['stream']['last_modify']
for server in servers:
stream_info = 'http:%s/%s/%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % \
(access_point, id, server, expire, callback, signature, last_modify)
try:
dict_stream = jsontools.load(httptools.downloadpage(stream_info).data)
if dict_stream['status']:
kind = dict_stream['result']['kind']
if kind == 'iframe':
url = dict_stream['result']['src']
title = '%s (%s)' % (item.title, server)
elif kind == 'jwplayer':
url_style = dict_stream['result']['setup']
if server != 'rin':
if 'playlist' in url_style:
part = 1
for media_list in url_style['playlist']:
url = media_list['file']
title = '%s (%s) - parte %s' % (item.title, server, part)
itemlist.append(item.clone(title=title, url=url, action='play'))
part += 1
else:
url = url_style['file']
title = '%s (%s)' % (item.title, server)
else:
src_list = url_style['sources']
for source in src_list:
url = source['file']
quality = source['label']
title = '%s [%s](%s)' % (item.title, quality, server)
itemlist.append(item.clone(title=title, url=url, action='play'))
elif kind == 'javascript':
if 'jsCode' in dict_stream['result']:
jscode = dict_stream['result']['jsCode']
url = scrapertools.find_single_match(jscode, 'xmlhttp.open\("GET", "(.*?)"')
title = '%s (%s)' % (item.title, server)
strm_url = base_url +'%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % (server, expire, callback,
signature, last_modify)
strm_data = httptools.downloadpage(strm_url).data
strm_data = scrapertools.unescape(strm_data)
title = '%s'
language = ''
if server not in ['fire', 'meph']:
urls = scrapertools.find_multiple_matches(strm_data, '"(?:file|src)"*?:.*?"(.*?)"')
for url in urls:
if url != '':
itemlist.append(item.clone(title=title, url=url, action='play'))
except:
pass
itemlist = servertools.get_servers_itemlist(itemlist)
url = url.replace ('\\/','/')
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play'))
elif server in ['fire', 'mpeh']:
url = scrapertools.find_single_match(strm_data, 'xmlhttp.open(\"GET\", \"(.*?)\"')
if url != '':
url = url.replace('\\/', '/')
itemlist.append(Item(channel=item.channel, title=url, url=url, action='play'))
else:
continue
servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
return itemlist
def newest(categoria):
logger.info()
item = Item()

View File

@@ -1,7 +1,7 @@
{
"id": "bajui",
"name": "Bajui",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "bajui.png",

View File

@@ -67,7 +67,7 @@ def mainlist(item):
itemlist.append(
item.clone(title="CineCalidad Portugues",
action="submenu",
host="http://cinemaqualidade.to/",
host="http://www.cinemaqualidade.to/",
thumbnail=thumbbr,
extra="filmes",
))
@@ -81,7 +81,7 @@ def submenu(item):
idioma = 'peliculas'
idioma2 = "destacada"
host = item.host
if item.host == "http://cinemaqualidade.to/":
if item.host == "http://www.cinemaqualidade.to/":
idioma = "filmes"
idioma2 = "destacado"
logger.info()

View File

@@ -1,14 +1,13 @@
{
"id": "gmobi",
"name": "gmobi",
"name": "GNULA.mobi",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://gnula.mobi/wp-content/uploads/2016/08/Untitled-6.png",
"thumbnail": "http://www.gnula.mobi/wp-content/uploads/2018/05/Captura-1.png?%3E",
"banner": "",
"categories": [
"movie",
"adult"
"movie"
],
"settings": [
{

View File

@@ -12,7 +12,7 @@ from core import tmdb
from core.item import Item
from platformcode import logger
host = 'http://gnula.mobi/'
host = 'http://www.gnula.mobi/'
def mainlist(item):
logger.info()
itemlist = list()
@@ -68,17 +68,24 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?<.*?src="(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches:
year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)')
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle = scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels={'year': year}))
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
filter_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w300", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
fulltitle = scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels={'filtro': filter_list}))
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="(.*?)"')
if next_page_url != "":
next_page_url = item.url + next_page_url
next_page_url = next_page_url
itemlist.append(item.clone(action="peliculas", title="Siguiente >>", text_color="yellow",
url=next_page_url))

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/I7MxHZI.png",
"thumbnail": "https://www.inkapelis.com/wp-content/uploads/2016/07/logitoinkapelis-min.png",
"banner": "inkapelis.png",
"categories": [
"movie",
@@ -35,6 +35,22 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
@@ -56,6 +72,20 @@
"Perfil 1"
]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "filtro_defecto_peliculas",
"type": "label",

View File

@@ -9,6 +9,10 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from channels import filtertools
from channels import autoplay
__modo_grafico__ = config.get_setting("modo_grafico", "inkapelis")
__perfil__ = config.get_setting("perfil", "inkapelis")
@@ -19,14 +23,34 @@ perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E'],
color1, color2, color3, color4 = perfil[__perfil__]
IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = ['Cam', 'TSHQ', 'Dvdrip', 'Blurayrip', 'HD Rip 320p', 'hd rip 320p', 'HD Real 720p', 'Full HD 1080p']
list_servers = ['openload', 'gamovideo', 'streamplay', 'streamango', 'vidoza']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Novedades", action="entradas", url="http://www.inkapelis.com/",
extra="Novedades", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
itemlist.append(item.clone(title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/",
text_color=color1, thumbnail=get_thumb('premieres', auto=True)))
#itemlist.append(item.clone(title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/",
# text_color=color1, thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(item.clone(title="Castellano", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('espanolas', auto=True)))
itemlist.append(item.clone(title="Latino", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('latino', auto=True)))
itemlist.append(item.clone(title="VOSE", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Subtitulada&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
itemlist.append(item.clone(title="Géneros", action="generos", url="http://www.inkapelis.com/", text_color=color1,
thumbnail=get_thumb('genres', auto=True),))
itemlist.append(item.clone(title="Buscar...", action="", text_color=color1))
@@ -43,6 +67,9 @@ def mainlist(item):
itemlist.append(
new_item.clone(action="filtro", title=title, url="http://www.inkapelis.com/?s=", text_color=color2))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -66,6 +93,16 @@ def newest(categoria):
if categoria == "terror":
item.url = "https://www.inkapelis.com/genero/terror/"
item.action = "entradas"
if categoria == "castellano":
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s="
item.extra = "Buscar"
item.action = "entradas"
if categoria == "latino":
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s="
item.extra = "Buscar"
item.action = "entradas"
itemlist = entradas(item)
if itemlist[-1].action == "entradas":
@@ -246,8 +283,6 @@ def entradas(item):
thumbnail = scrapedthumbnail.replace("w185", "original")
title = scrapedtitle
calidad = calidad.strip()
if calidad:
title += " [" + calidad + "]"
itemlist.append(item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail,
contentTitle=scrapedtitle, fulltitle=scrapedtitle,
@@ -272,6 +307,10 @@ def entradas(item):
if category == "Eroticas +18":
continue
idioma = idioma.strip()
if idioma in IDIOMAS:
idioma = IDIOMAS[idioma]
else:
idioma = IDIOMAS['Subtitulado']
calidad = calidad.strip()
scrapedtitle = scrapedtitle.replace("Ver Pelicula ", "")
title = scrapedtitle
@@ -358,13 +397,17 @@ def findvideos(item):
patron = '<td><a href="([^"]+)".*?title="([^"]+)".*?<td>([^"]+)<\/td><td>([^"]+)<\/td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, server, idioma, calidad in matches:
if idioma in IDIOMAS:
idioma= IDIOMAS[idioma]
else:
idioma = IDIOMAS['Subtitulado']
if server == "Embed":
server = "Nowvideo"
if server == "Ul":
server = "Uploaded"
title = "%s [%s][%s]" % (server, idioma, calidad)
itemlist.append(item.clone(action="play", title=title, url=url, language = idioma, quality = calidad,
server = server))
itemlist.append(item.clone(action="play", title=title, url=url, language=idioma, quality=calidad,
server=server, infoLabels=item.infoLabels))
patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -374,8 +417,15 @@ def findvideos(item):
title = "Directo"
idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed)
title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad)
itemlist.append(item.clone(action="play", title=title, url=url, language = idioma, quality = calidad,
server = server))
itemlist.append(item.clone(action="play", title=title, url=url, language=idioma, quality=calidad,
server=server))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
if not config.get_setting('menu_trailer', item.channel):
@@ -402,4 +452,7 @@ def play(item):
else:
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.infoLabels=item.infoLabels
return itemlist

View File

@@ -2,7 +2,7 @@
"id": "lacajita",
"name": "LaCajita",
"language": ["cast", "lat"],
"active": true,
"active": false,
"adult": false,
"thumbnail": "http://i.imgur.com/LVdupxc.png",
"categories": [
@@ -66,4 +66,4 @@
"visible": true
}
]
}
}

View File

@@ -10,26 +10,8 @@ from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
"Suspense": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.cc/94sia332d/drama.png",
"Acción": "https://s3.postimg.cc/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.cc/6su40czih/aventura.png",
"Romance": "https://s15.postimg.cc/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.cc/5on877l87/animacion.png",
"Ciencia ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.cc/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.cc/bbxmdh9c7/musical.png",
"Fantasía": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
"Misterio": "https://s1.postimg.cc/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.cc/6z27zhirx/crimen.png",
"Familia": "https://s7.postimg.cc/6s7vdhqrf/familiar.png",
"Guerra": "https://s4.postimg.cc/n1h2jp2jh/guerra.png",
"Western": "https://s23.postimg.cc/lzyfbjzhn/western.png",
"Historia": "https://s15.postimg.cc/fmc050h1n/historia.png"
}
thumbletras = {'#': 'https://s32.postimg.cc/drojt686d/image.png',
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
@@ -73,53 +55,46 @@ def mainlist(item):
itemlist.append(item.clone(title="Estrenos",
action="lista",
thumbnail='https://s21.postimg.cc/fy69wzm93/estrenos.png',
fanart='https://s21.postimg.cc/fy69wzm93/estrenos.png',
thumbnail=get_thumb('premieres', auto=True),
url=host + 'estrenos'
))
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail='https://s18.postimg.cc/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
thumbnail=get_thumb('all', auto=True),
url=host
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host,
thumbnail='https://s3.postimg.cc/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
thumbnail=get_thumb('genres', auto=True),
extra='generos'
))
itemlist.append(item.clone(title="Alfabetico",
action="seccion",
url=host,
thumbnail='https://s17.postimg.cc/fwi1y99en/a-z.png',
fanart='https://s17.postimg.cc/fwi1y99en/a-z.png',
thumbnail=get_thumb('alphabet', auto=True),
extra='a-z'
))
itemlist.append(item.clone(title="Mas Vistas",
action="lista",
thumbnail='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
thumbnail=get_thumb('more watched', auto=True),
url=host + 'peliculas-mas-vistas'
))
itemlist.append(item.clone(title="Mas Votadas",
action="lista",
thumbnail='https://s7.postimg.cc/9kg1nthzf/votadas.png',
fanart='https://s7.postimg.cc/9kg1nthzf/votadas.png',
thumbnail=get_thumb('more voted', auto=True),
url=host + 'peliculas-mas-votadas'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '?s=',
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
thumbnail=get_thumb('search', auto=True)
))
return itemlist
@@ -178,7 +153,7 @@ def seccion(item):
itemlist = []
data = get_source(item.url)
if item.extra == 'generos':
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)<\/li>'
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)</a><\/li>'
elif item.extra == 'a-z':
patron = '<li><a href=(.*?)>(\w|#)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -187,12 +162,8 @@ def seccion(item):
url = scrapedurl
thumbnail = ''
if item.extra == 'generos':
title = re.sub(r'<\/a> \(\d+\)', '', scrapedtitle)
cantidad = re.findall(r'.*?<\/a> \((\d+)\)', scrapedtitle)
th_title = title
title = title + ' (' + cantidad[0] + ')'
if th_title in tgenero:
thumbnail = tgenero[th_title]
#cantidad = re.findall(r'.*?<\/a> \((\d+)\)', scrapedtitle)
title = scrapedtitle
else:
title = scrapedtitle
if title.lower() in thumbletras:

View File

@@ -434,6 +434,7 @@ def nuevos_cap(item):
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
logger.debug(data)
capitulos = []
if "Nuevas" in item.title:
for child in data["b"]:
@@ -455,13 +456,15 @@ def nuevos_cap(item):
infoLabels['season'] = int(season)
except:
infoLabels['season'] = 0
if "Nuevos" in item.title:
if not child['episode']:
episode = scrapertools.find_single_match(child['name'], '\d+x(\d+)')
if not episode:
episode = "0"
infoLabels['episode'] = int(episode)
elif "al" in child['episode']:
episode = "0"
infoLabels['episode'] = int(episode)
else:
infoLabels['episode'] = int(child['episode'])
infoLabels['mediatype'] = "episode"

View File

@@ -162,14 +162,14 @@ def seasons(item):
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="all_episodes",
extra="episodios",
contentSerieName=item.contentSerieName,
))
return itemlist
def epidodios(item):
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)