Canales reparados y ajustes

This commit is contained in:
Unknown
2017-09-12 14:25:17 -03:00
parent e2b047daef
commit 174165fbf4
5 changed files with 158 additions and 198 deletions

View File

@@ -355,8 +355,6 @@ def findvideos(item):
except:
return itemlist # Devolvemos lista vacia
lista_servers = servertools.get_servers_list()
for link in data_dict["link"]:
if item.contentType == 'episode' \
and (item.contentSeason != link['season'] or item.contentEpisodeNumber != link['episode']):
@@ -367,17 +365,17 @@ def findvideos(item):
flag = scrapertools.find_single_match(link["label"], '(\s*\<img src=.*\>)')
idioma = link["label"].replace(flag, "")
if link["quality"] != "?":
calidad = (' [' + link["quality"] + ']')
calidad = (link["quality"])
else:
calidad = ""
video = find_videos(link["url"], lista_servers)
itemlist.extend(servertools.find_video_items(data=url))
if video["servidor"] != "":
servidor = video["servidor"]
url = video["url"]
title = "Ver en " + servidor.capitalize() + calidad + ' (' + idioma + ')'
itemlist.append(item.clone(action="play", viewmode="list", server=servidor, title=title,
text_color="0xFF994D00", url=url, folder=False))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.quality = calidad
videoitem.language = idioma
videoitem.contentTitle = item.title
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and itemlist and item.contentType == "movie":
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
@@ -390,40 +388,6 @@ def findvideos(item):
return itemlist
def find_videos(url, lista_servers):
# logger.info()
ret = {'titulo': "",
'url': "",
'servidor': ""}
# Ejecuta el find_videos en cada servidor hasta que encuentra una coicidencia
for serverid in lista_servers:
try:
servers_module = __import__("servers." + serverid)
server_module = getattr(servers_module, serverid)
devuelve = server_module.find_videos(url)
if devuelve:
ret["titulo"] = devuelve[0][0]
ret["url"] = devuelve[0][1]
ret["servidor"] = devuelve[0][2]
# reordenar el listado, es probable q el proximo enlace sea del mismo servidor
lista_servers.remove(serverid)
lista_servers.insert(0, serverid)
break
except ImportError:
logger.error("No existe conector para #" + serverid + "#")
# import traceback
# logger.info(traceback.format_exc())
except:
logger.error("Error en el conector #" + serverid + "#")
import traceback
logger.error(traceback.format_exc())
return ret
def episodios(item):
# Necesario para las actualizaciones automaticas
return get_temporadas(Item(channel=__chanel__, url=item.url, show=item.show, extra="serie_add"))

View File

@@ -60,18 +60,15 @@ def todas(item):
idioma = scrapertools.decodeHtmlentities(idioma)
url = urlparse.urljoin(item.url, scrapedurl)
year = scrapedyear
if idioma in audio:
idioma = audio[idioma]
else:
idioma = audio['Sub Español']
title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (' + idioma + ')'
title = scrapertools.decodeHtmlentities(scrapedtitle)
thumbnail = scrapedthumbnail
plot = scrapedplot
fanart = 'https://s31.postimg.org/dousrbu9n/qserie.png'
itemlist.append(
Item(channel=item.channel, action="temporadas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, extra=idioma, contentSerieName=scrapedtitle, infoLabels={'year': year}))
fanart=fanart, extra=idioma, contentSerieName=scrapedtitle, infoLabels={'year': year},
language=idioma))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
siguiente = ''

281
plugin.video.alfa/channels/repelis.py Executable file → Normal file
View File

@@ -5,6 +5,8 @@ import urlparse
from core import scrapertools
from core import servertools
from core import httptools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -19,130 +21,107 @@ def mainlist(item):
mifan = "http://www.psicocine.com/wp-content/uploads/2013/08/Bad_Robot_Logo.jpg"
itemlist.append(Item(channel=item.channel, action="menupelis", title="Peliculas", url="http://www.repelis.tv/pag/1",
thumbnail="http://www.gaceta.es/sites/default/files/styles/668x300/public/metro_goldwyn_mayer_1926-web.png?itok=-lRSR9ZC",
thumbnail="http://www.gaceta.es/sites/default/files/styles/668x300/public"
"/metro_goldwyn_mayer_1926-web.png?itok=-lRSR9ZC",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menuestre", title="Estrenos",
url="http://www.repelis.tv/archivos/estrenos/pag/1",
thumbnail="http://t0.gstatic.com/images?q=tbn:ANd9GcS4g68rmeLQFuX7iCrPwd00FI_OlINZXCYXEFrJHTZ0VSHefIIbaw",
thumbnail="http://t0.gstatic.com/images?q=tbn"
":ANd9GcS4g68rmeLQFuX7iCrPwd00FI_OlINZXCYXEFrJHTZ0VSHefIIbaw",
fanart=mifan))
itemlist.append(
Item(channel=item.channel, action="menudesta", title="Destacadas", url="http://www.repelis.tv/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="todaspelis", title="Proximos estrenos",
Item(channel=item.channel, action="menudesta", title="Destacadas", url="http://www.repelis.tv/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
url="http://www.repelis.tv/archivos/proximos-estrenos/pag/1",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
fanart=mifan))
itemlist.append(
Item(channel=item.channel, action="todaspelis", title="Todas las Peliculas", url="http://www.repelis.tv/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
url="http://www.repelis.tv/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
if config.get_setting("adult_mode") != 0:
itemlist.append(Item(channel=item.channel, action="todaspelis", title="Eroticas +18",
itemlist.append(Item(channel=item.channel, action="menupelis", title="Eroticas +18",
url="http://www.repelis.tv/genero/eroticas/pag/1",
thumbnail="http://www.topkamisetas.com/catalogo/images/TB0005.gif",
fanart="http://www.topkamisetas.com/catalogo/images/TB0005.gif"))
# Quito la busqueda por año si no esta enabled el adultmode, porque no hay manera de filtrar los enlaces eroticos72
fanart="http://www.topkamisetas.com/catalogo/images/TB0005.gif", extra='adult'))
# Quito la busqueda por año si no esta enabled el adultmode, porque no hay manera de filtrar los enlaces
# eroticos72
itemlist.append(
Item(channel=item.channel, action="poranyo", title="Por Año", url="http://www.repelis.tv/anio/2016",
thumbnail="http://t3.gstatic.com/images?q=tbn:ANd9GcSkxiYXdBcI0cvBLsb_nNlz_dWXHRl2Q-ER9dPnP1gNUudhrqlR",
fanart=mifan))
Item(channel=item.channel, action="poranyo", title="Por Año", url="http://www.repelis.tv/anio/2016",
thumbnail="http://t3.gstatic.com/images?q=tbn:ANd9GcSkxiYXdBcI0cvBLsb_nNlz_dWXHRl2Q"
"-ER9dPnP1gNUudhrqlR",
fanart=mifan))
# Por categoria si que filtra la categoria de eroticos
itemlist.append(Item(channel=item.channel, action="porcateg", title="Por Categoria",
url="http://www.repelis.tv/genero/accion/pag/1",
thumbnail="http://www.logopro.it/blog/wp-content/uploads/2013/07/categoria-sigaretta-elettronica.png",
thumbnail="http://www.logopro.it/blog/wp-content/uploads/2013/07/categoria-sigaretta"
"-elettronica.png",
fanart=mifan))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar...", url="http://www.repelis.tv/search/?s=",
thumbnail="http://thumbs.dreamstime.com/x/buscar-pistas-13159747.jpg", fanart=mifan))
Item(channel=item.channel, action="search", title="Buscar...", url="http://www.repelis.tv/search/?s=",
thumbnail="http://thumbs.dreamstime.com/x/buscar-pistas-13159747.jpg", fanart=mifan))
return itemlist
# Peliculas recien agregadas ( quitamos las de estreno del slide-bar en el top
def menupelis(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
if item.extra == '':
section = 'Recién Agregadas'
elif item.extra == 'year':
section = 'del Año \d{4}'
elif item.extra == 'adult':
section = 'de Eróticas \+18'
else:
section = 'de %s'%item.extra
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
patronenlaces = '<h3>Películas Recién Agregadas</h3>.*?>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
logger.info("begin ----------")
scrapertools.printMatches(matchesenlaces)
logger.info("end ----------")
for bloque_enlaces in matchesenlaces:
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
logger.info("He encontrado el segundo bloque")
logger.info("extra_info: %s" % extra_info)
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
language = scrapertools.find_multiple_matches(extra_info, 'class="(latino|espanol|subtitulado)"')
# if language = 'ingles':
# language='vo'
new_item=Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
infoLabels={'year': year})
if year:
tmdb.set_infoLabels_item(new_item)
## Paginación
# <span class="current">2</span><a href="http://www.repelis.tv/page/3"
itemlist.append(new_item)
# Si falla no muestra ">> Página siguiente"
try:
next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
itemlist.append(
Item(channel=item.channel, title=title, url=next_page, action="menupelis", thumbnail=item.thumbnail,
fanart=item.fanart, folder=True))
except:
pass
return itemlist
# Todas las peliculas
def todaspelis(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
print data
patronenlaces = '<h1>.*?</h1>.*?>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
## Paginación
# <span class="current">2</span><a href="http://www.repelis.tv/page/3"
# Si falla no muestra ">> Página siguiente"
try:
next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
itemlist.append(Item(channel=item.channel, title=title, url=next_page, action="todaspelis", folder=True))
Item(channel=item.channel, title=title, url=next_page, action="menupelis", thumbnail=item.thumbnail,
fanart=item.fanart, folder=True, extra=item.extra))
except:
pass
return itemlist
@@ -154,7 +133,7 @@ def menudesta(item):
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patronenlaces = '<h3>.*?Destacadas.*?>(.*?)<h3>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
@@ -168,7 +147,6 @@ def menudesta(item):
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
@@ -186,7 +164,7 @@ def menuestre(item):
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
@@ -195,18 +173,22 @@ def menuestre(item):
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
language = scrapertools.find_single_match(extra_info, 'class="(latino|espanol|subtitulado)"')
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
infoLabels={'year': year}))
## Paginación
# <span class="current">2</span><a href="http://www.repelis.tv/page/3"
@@ -226,34 +208,15 @@ def findvideos(item):
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
'''<h2>Sinopsis</2><p>(.*?)</p>
<div id="informacion" class="tab-pane">
<h2>Titulo en Español</h2>
<p>Abzurdah</p>
<h2>Titulo Original</h2>
<p>Abzurdah</p>
<h2>Año de Lanzamiento</h2>
<p>2015</p>
<h2>Generos</h2>
<p>Romance</p>
<h2>Idioma</h2>
<p>Latino</p>
<h2>Calidad</h2>
<p>DVD-Rip</p>
'''
# estos son los datos para plot
patron = '<h2>Sinopsis</h2>.*?<p>(.*?)</p>.*?<div id="informacion".*?</h2>.*?<p>(.*?)</p>' # titulo
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for sinopsis, title in matches:
title = "[COLOR white][B]" + title + "[/B][/COLOR]"
patron = '<div id="informacion".*?>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedplot in matches:
splot = title + "\n\n"
plot = scrapedplot
@@ -273,37 +236,45 @@ def findvideos(item):
patron = '<tbody>(.*?)</tbody>'
matchesx = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matchesx)
for bloq in matchesx:
patron = 'href="(.*?)".*?0 0">(.*?)</.*?<td>(.*?)</.*?<td>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloq)
# scrapertools.printMatches(matches)
for scrapedurl, scrapedserver, scrapedlang, scrapedquality in matches:
url = urlparse.urljoin(item.url, scrapedurl)
logger.info("Lang:[" + scrapedlang + "] Quality[" + scrapedquality + "] URL[" + url + "]")
patronenlaces = '.*?://(.*?)/'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(scrapedurl)
scrapertools.printMatches(matchesenlaces)
scrapedtitle = ""
if scrapedserver == 'Vimple':
scrapedserver = 'vimpleru'
elif scrapedserver == 'Ok.ru':
scrapedserver = 'okru'
server = servertools.get_server_name(scrapedserver)
for scrapedenlace in matchesenlaces:
scrapedtitle = title + " [COLOR white][ [/COLOR]" + "[COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR]" + " [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, extra=title, url=url, fanart=item.thumbnail,
thumbnail=item.thumbnail, plot=splot, folder=False))
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, extra=title, url=url,
fanart=item.thumbnail, thumbnail=item.thumbnail, plot=splot, language=scrapedlang,
quality=scrapedquality, server=server))
return itemlist
def play(item):
logger.info("url=" + item.url)
logger.info()
itemlist =[]
# itemlist = servertools.find_video_items(data=item.url)
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(scrapertools.cache_page(item.url), '<iframe src="([^"]+)"')
itemlist = servertools.find_video_items(data=url)
enc = scrapertools.find_multiple_matches(data, "Player\.decode\('(.*?)'\)")
dec=''
for cod in enc:
dec+=decode(cod)
url = scrapertools.find_single_match(dec,'src="(.*?)"')
itemlist.append(item.clone(url=url))
return itemlist
@@ -314,17 +285,10 @@ def search(item, texto):
item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto)
logger.info(item.url)
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
logger.info("data: " + data)
'''
<div class="col-xs-2">
<div class="row">
<a href="http://www.repelis.tv/8973/pelicula/contracted-phase-ii.html" title="Ver Película Contracted: Phase II Online">
<img src="http://1.bp.blogspot.com/-YWmw6voBipE/VcB91p-EcnI/AAAAAAAAQZs/EhUzWlInmA8/s175/contracted-phase-2.jpg" border="0">
'''
patron = '<div class="col-xs-2">.*?'
patron += '<div class="row">.*?'
patron += '<a href="(.*?)" title="(.*?)">.*?'
@@ -334,8 +298,6 @@ def search(item, texto):
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
print "repelis ..................................."
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
@@ -344,43 +306,38 @@ def search(item, texto):
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
logger.info(url)
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
fanart=thumbnail))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
return itemlist
# Por año, aquí está difícil filtrar las "eroticas" así que quito la opcion si no esta el adultmode enabled
def poranyo(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<option value="([^"]+)">(.*?)</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title, url=url, fanart=item.fanart))
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra='year'))
return itemlist
# Aqui si que se filtran las eroticas
def porcateg(item):
logger.info(item.url)
itemlist = []
data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedurl, scrapedtitle in matches:
@@ -389,17 +346,49 @@ def porcateg(item):
url = urlparse.urljoin(item.url, scrapedurl)
logger.info(url)
# si no esta permitidas categoria adultos, la filtramos
erotica = ""
if config.get_setting("adult_mode") == 0:
patron = '.*?/erotic.*?'
try:
erotica = scrapertools.get_match(scrapedurl, patron)
except:
itemlist.append(
Item(channel=item.channel, action="todaspelis", fanart=item.fanart, title=title, fulltitle=title,
url=url))
extra = title
adult_mode = config.get_setting("adult_mode")
if adult_mode != 0:
if 'erotic' in scrapedurl:
extra = 'adult'
else:
itemlist.append(Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title, url=url,
fanart=item.fanart))
extra=title
if (extra=='adult' and adult_mode != 0) or extra != 'adult':
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra = extra))
return itemlist
def decode(string):
keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
output = ''
input = string.encode('utf8')
i = 0
while i < len(input):
enc1 = keyStr.index(input[i])
i += 1
enc2 = keyStr.index(input[i])
i += 1
enc3 = keyStr.index(input[i])
i += 1
enc4 = keyStr.index(input[i])
i += 1
chr1 = (enc1 << 2) | (enc2 >> 4)
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
chr3 = ((enc3 & 3) << 6) | enc4
output = output + unichr(chr1)
if enc3 != 64:
output = output + unichr(chr2)
if enc4 != 64:
output = output + unichr(chr3)
output = output.decode('utf8')
return output

View File

@@ -28,7 +28,12 @@
]
},
"free": true,
"id": "okru",
"id": {
"value": [
"okru",
"ok.ru"
]
},
"name": "okru",
"settings": [
{

View File

@@ -20,7 +20,12 @@
]
},
"free": true,
"id": "vimpleru",
"id": {
"value": [
"vimpleru",
"vimple"
]
},
"name": "vimpleru",
"settings": [
{