SeriesLan: Agregada Funcion gktools y primera version de peliculas (aun no funcional)
This commit is contained in:
@@ -12,6 +12,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
from lib import gktools
|
||||
|
||||
IDIOMAS = {'latino': 'Latino'}
|
||||
list_language = IDIOMAS.values()
|
||||
@@ -31,9 +32,11 @@ def mainlist(item):
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0))
|
||||
Item(channel=item.channel, action="lista", title="Series", contentSerieName="Series", url=host, thumbnail=thumb_series, page=0))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0))
|
||||
Item(channel=item.channel, action="lista", title="Live Action", contentSerieName="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0))
|
||||
#itemlist.append(
|
||||
# Item(channel=item.channel, action="peliculas", title="Películas", contentSerieName="Películas", url=host+"/peliculas", thumbnail=thumb_series, page=0))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
@@ -47,7 +50,7 @@ def lista(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a href="([^"]+)" '
|
||||
if item.title == "Series":
|
||||
if item.contentSerieName == "Series":
|
||||
patron += 'class="link">.+?<img src="([^"]+)".*?'
|
||||
else:
|
||||
patron += 'class="link-la">.+?<img src="([^"]+)".*?'
|
||||
@@ -84,14 +87,46 @@ def lista(item):
|
||||
url=host+"/pag-"+str(a)
|
||||
if b>10:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
|
||||
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
|
||||
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<div class="pel play" dt="(.+?)" .+?><img src="(.+?)" .+? title="(.*?)"><span class=".+?">(.+?)<\/span><a href="(.+?)" class.+?>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
# Paginacion
|
||||
num_items_x_pagina = 30
|
||||
min = item.page * num_items_x_pagina
|
||||
min=min-item.page
|
||||
max = min + num_items_x_pagina - 1
|
||||
b=0
|
||||
for scrapedplot,scrapedthumbnail, scrapedtitle, scrapedyear, scrapedurl in matches[min:max]:
|
||||
b=b+1
|
||||
url = host + scrapedurl
|
||||
thumbnail = host +scrapedthumbnail
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
itemlist.append(item.clone(title=scrapedtitle+"-"+scrapedyear, url=url, action="findvideos", thumbnail=thumbnail, plot=scrapedplot,
|
||||
show=scrapedtitle,contentSerieName=scrapedtitle,context=context))
|
||||
if b<29:
|
||||
pass
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="peliculas", page=item.page + 1))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
@@ -163,7 +198,7 @@ def findvideos(item):
|
||||
_x0x = scrapertools.find_single_match(data_new, 'var x0x = ([^;]+);')
|
||||
try:
|
||||
x0x = eval(_x0x)
|
||||
url = resolve(x0x[4], base64.b64decode(x0x[1]))
|
||||
url = base64.b64decode(gktools.transforma_gsv(x0x[4], base64.b64decode(x0x[1])))
|
||||
if 'download' in url:
|
||||
url = url.replace('download', 'preview')
|
||||
title = '%s'
|
||||
@@ -191,27 +226,4 @@ def golink (num, sa, sl):
|
||||
SVR = "https://viteca.stream" if sa == 'true' else "http://serieslan.com"
|
||||
TT = "/" + urllib.quote_plus(sl[3].replace("/", "><")) if num == 0 else ""
|
||||
|
||||
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
|
||||
|
||||
|
||||
def resolve(value1, value2):
|
||||
reto = ''
|
||||
lista = range(256)
|
||||
j = 0
|
||||
for i in range(256):
|
||||
j = (j + lista[i] + ord(value1[i % len(value1)])) % 256
|
||||
k = lista[i]
|
||||
lista[i] = lista[j]
|
||||
lista[j] = k
|
||||
|
||||
m = 0;
|
||||
j = 0;
|
||||
for i in range(len(value2)):
|
||||
m = (m + 1) % 256
|
||||
j = (j + lista[m]) % 256
|
||||
k = lista[m]
|
||||
lista[m] = lista[j]
|
||||
lista[j] = k
|
||||
reto += chr(ord(value2[i]) ^ lista[(lista[m] + lista[j]) % 256])
|
||||
|
||||
return reto
|
||||
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
|
||||
Reference in New Issue
Block a user