Actualizados

danimados: fix varias temporadas
diskokosmiko: eliminado
pelisgratis: fix
rexpelis: fix tmdb
seriesblancoxyz: eliminado
sipeliculas: cambio de dominio
yape: nueva calidad
upvid: fix
vidtome: fix
xdrive: fix
This commit is contained in:
Intel1
2018-11-07 10:14:32 -05:00
parent 17f57aedf0
commit 193efdda6d
14 changed files with 78 additions and 946 deletions

View File

@@ -113,7 +113,6 @@ def lista(item):
data_lista = scrapertools.find_single_match(data,
'<div class="items">(.+?)<\/div><\/div><div class=.+?>')
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
if item.title=="Peliculas Animadas":
@@ -134,13 +133,13 @@ def episodios(item):
itemlist = []
infoLabels = {}
data = httptools.downloadpage(item.url).data
patron = '(?s)<ul class="episodios">(.+?)<\/ul>'
patron = '(?s)<ul class="episodios">(.+?)<span>Compartido'
data_lista = scrapertools.find_single_match(data,patron)
contentSerieName = item.title
patron_caps = 'href="([^"]+)".*?'
patron_caps += 'src="([^"]+)".*?'
patron_caps += 'numerando">([^<]+).*?'
patron_caps += 'link_go">.*?>([^<]+)'
patron_caps += 'episodiotitle">.*?>([^<]+)'
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedurl, scrapedthumbnail, scrapedtempepi, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
@@ -161,25 +160,26 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div id="playex" .+?>(.+?)<\/nav>'
data1 = scrapertools.find_single_match(data, patron)
patron = "changeLink\('([^']+)'\)"
matches = scrapertools.find_multiple_matches(data1, patron)
for url64 in matches:
url1 =base64.b64decode(url64)
if 'danimados' in url1:
url = 'https:'+url1.replace('stream/', 'stream_iframe/')
id = scrapertools.find_single_match(url, 'iframe/(.*)')
url = url.replace(id, base64.b64encode(id))
new_data = httptools.downloadpage(url).data
new_data = new_data.replace('"',"'")
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
if "zkstream" in url or "cloudup" in url:
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
else:
url1 = url
if url1:
itemlist.append(item.clone(title='%s',url=url1, action="play"))
if "onclick=\"changeLink('" in data:
patron = "onclick=.changeLink\('([^']+)'"
matches = scrapertools.find_multiple_matches(data, patron)
for id in matches:
url = devuelve_enlace(base64.b64decode(id))
itemlist.append(item.clone(title="Ver en %s",url=url, action="play"))
else:
patron = 'data-type="([^"]+).*?'
patron += 'data-post="([^"]+).*?'
patron += 'data-nume="([^"]+).*?'
patron += 'server">([^<]+).*?'
matches = scrapertools.find_multiple_matches(data, patron)
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
url1 = devuelve_enlace(url1)
if url1:
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
@@ -193,3 +193,18 @@ def findvideos(item):
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def devuelve_enlace(url1):
if 'danimados' in url1:
url = 'https:'+url1.replace('stream/', 'stream_iframe/')
id = scrapertools.find_single_match(url, 'iframe/(.*)')
url = url.replace(id, base64.b64encode(id))
new_data = httptools.downloadpage(url).data
new_data = new_data.replace('"',"'")
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
if "zkstream" in url or "cloudup" in url:
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
else:
url1 = url
return url1

View File

@@ -1,64 +0,0 @@
{
"id": "diskokosmiko",
"name": "Diskokosmiko",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"banner": "copiapop.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "diskokosmikouser",
"type": "text",
"color": "0xFFC52020",
"label": "Usuario Diskokosmiko",
"enabled": true,
"visible": true
},
{
"id": "diskokosmikopassword",
"type": "text",
"color": "0xFFC52020",
"hidden": true,
"label": "Password Diskokosmiko",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "adult_content",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Mostrar contenido adulto en las búsquedas",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,366 +0,0 @@
# -*- coding: utf-8 -*-
import re
import threading
import urllib
import xbmc
from core import downloadtools
from core import filetools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
__perfil__ = config.get_setting('perfil', "diskokosmiko")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
adult_content = config.get_setting("adult_content", "diskokosmiko")
def login(pagina):
logger.info()
try:
dom = pagina.split(".")[0]
user = config.get_setting("%suser" %dom, "diskokosmiko")
password = config.get_setting("%spassword" %dom, "diskokosmiko")
if not user:
return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina)
data = httptools.downloadpage("http://%s" % pagina).data
if re.search(r'(?i)%s' % user, data):
return True, ""
token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"')
post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password)
headers = {'X-Requested-With': 'XMLHttpRequest'}
url_log = "http://%s/action/Account/Login" % pagina
data = httptools.downloadpage(url_log, post, headers).data
if "redirectUrl" in data:
logger.info("Login correcto")
return True, ""
else:
logger.error("Error en el login")
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
except:
import traceback
logger.error(traceback.format_exc())
return False, "Error durante el login. Comprueba tus credenciales"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://diskokosmiko.mx/"
itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2))
itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
itemlist.append(item.clone(action="", title=""))
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_disko')
files = filetools.listdir(folder_thumb)
if files:
itemlist.append(
item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red"))
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
return itemlist
def search(item, texto):
logger.info()
item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace(
" ", "+")
try:
return listado(item)
except:
import sys, traceback
for line in sys.exc_info():
logger.error("%s" % line)
logger.error(traceback.format_exc())
return []
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def listado(item):
logger.info()
itemlist = []
data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
if not item.post:
data_thumb = ""
item.url = item.url.replace("/gallery,", "/list,")
data = httptools.downloadpage(item.url, item.post).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
patron = 'data-file-id(.*?</p>)</div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
continue
size = scrapertools.find_single_match(block, '<p.*?>([^<]+)</p>')
patron = 'class="name"><a href="([^"]+)".*?>([^<]+)<'
scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron)
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
if scrapedthumbnail:
try:
thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
if data_thumb:
url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb)
else:
url_thumb = scrapedthumbnail
scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
scrapedurl = item.extra + scrapedurl
title = "%s (%s)" % (scrapedtitle, size)
if "adult_info" in block:
title += " [COLOR %s][+18][/COLOR]" % color4
plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>')
if plot:
plot = scrapertools.decodeHtmlentities(plot)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2,
extra=item.extra, infoLabels={'plot': plot}, post=item.post)
if item.post:
try:
new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block,
'<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
except:
pass
else:
new_item.folderurl = item.url.rsplit("/", 1)[0]
new_item.foldername = item.foldername
new_item.fanart = item.thumbnail
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
url = item.url
else:
url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
post = ""
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
url=url, post=post, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="diskokosmiko"))
usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
url_usuario = item.extra + "/" + usuario
if item.folderurl and not item.folderurl.startswith(item.extra):
item.folderurl = item.extra + item.folderurl
if item.post:
itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername,
url=item.folderurl + "/gallery,1,1?ref=pager", post=""))
data = httptools.downloadpage(item.folderurl).data
token = scrapertools.find_single_match(data,
'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"')
collection_id = item.folderurl.rsplit("-", 1)[1]
post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id)
url = "%s/action/Follow/Follow" % item.extra
title = "Seguir Colección: %s" % item.foldername
if "dejar de seguir" in data:
title = "Dejar de seguir la colección: %s" % item.foldername
url = "%s/action/Follow/UnFollow" % item.extra
itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False))
itemlist.append(
item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario))
return itemlist
def colecciones(item):
logger.info()
itemlist = []
usuario = False
data = httptools.downloadpage(item.url).data
if "Ver colecciones del usuario" not in item.title and not item.index:
data = jsontools.load(data)["Data"]
content = data["Content"]
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
else:
usuario = True
if item.follow:
content = scrapertools.find_single_match(data,
'id="followed_collections"(.*?)<div id="recommended_collections"')
else:
content = scrapertools.find_single_match(data,
'<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list')
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>'
matches = scrapertools.find_multiple_matches(content, patron)
index = ""
if item.index and item.index != "0":
matches = matches[item.index:item.index + 20]
if len(matches) > item.index + 20:
index = item.index + 20
elif len(matches) > 20:
matches = matches[:20]
index = 20
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
for url, scrapedtitle, thumb, info in matches:
url = item.extra + url + "/gallery,1,1?ref=pager"
title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:])
except:
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:])
thumb = thumb.replace("/thumbnail/", "/")
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = thumb
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra,
foldername=scrapedtitle))
if not usuario and data.get("NextPageUrl"):
url = item.extra + data["NextPageUrl"]
itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color=""))
elif index:
itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color=""))
return itemlist
def seguir(item):
logger.info()
data = httptools.downloadpage(item.url, item.post)
message = "Colección seguida"
if "Dejar" in item.title:
message = "La colección ya no se sigue"
if data.sucess and config.get_platform() != "plex":
platformtools.dialog_notification("Acción correcta", message)
def cuenta(item):
logger.info()
itemlist = []
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
return itemlist
user = config.get_setting("%suser" % web, "diskokosmiko")
user = unicode(user, "utf8").lower().encode("utf8")
url = item.extra + "/" + urllib.quote(user)
data = httptools.downloadpage(url).data
num_col = scrapertools.find_single_match(data, 'name="Has_collections" value="([^"]+)"')
if num_col != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Ver mis colecciones",
text_color=color5))
else:
itemlist.append(item.clone(action="", title="No tienes ninguna colección", text_color=color4))
num_follow = scrapertools.find_single_match(data, 'name="Follows_collections" value="([^"]+)"')
if num_follow != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Colecciones que sigo",
text_color=color5, follow=True))
else:
itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
dict_values = None
list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tipo', 'label': 'Tipo de búsqueda', 'enabled': True, 'color': '0xFFFF8000',
'type': 'list', 'default': -1, 'visible': True})
list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos']
valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', '']
list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA',
'type': 'text', 'default': '0', 'visible': True})
list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA',
'type': 'text', 'default': '0', 'visible': True})
# Se utilizan los valores por defecto/guardados
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel)
if valores_guardados:
dict_values = valores_guardados
item.valores = valores
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra la búsqueda", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
# Guarda el filtro para que sea el que se cargue por defecto
config.set_setting("filtro_defecto_" + web, values_copy, item.channel)
tipo = item.valores["tipo"][values["tipo"]]
search = values["search"]
ext = values["ext"]
tmin = values["tmin"]
tmax = values["tmax"]
if not tmin.isdigit():
tmin = "0"
if not tmax.isdigit():
tmax = "0"
item.valores = ""
item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \
% (tipo, search, tmin, tmax, ext)
item.action = "listado"
return listado(item)
def download_thumb(filename, url):
lock = threading.Lock()
lock.acquire()
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
if not filetools.exists(folder):
filetools.mkdir(folder)
lock.release()
if not filetools.exists(filename):
downloadtools.downloadfile(url, filename, silent=True)
return filename
def delete_cache(url):
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
filetools.rmdirtree(folder)
if config.is_xbmc():
xbmc.executebuiltin("Container.Refresh")

View File

@@ -45,7 +45,7 @@ thumbletras = {'#': 'https://s32.postimg.cc/drojt686d/image.png',
audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]',
'Sub Español': '[COLOR red]SUB ESPAÑOL[/COLOR]'}
host = 'http://pelisgratis.tv/'
host = 'http://pelisgratis.me/'
def mainlist(item):
@@ -187,9 +187,9 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = '<div class=TPlayerTb.Current id=(.*?)>.*?src=(.*?) frameborder'
data = data.replace("&lt;","<").replace("&quot;",'"').replace("&gt;",">").replace("&amp;","&").replace('\"',"")
patron = '<div class=TPlayerTb.*?id=(.*?)>.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
base_link = 'https://repros.live/player/ajaxdata'
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
headers = {'referer':item.url}
@@ -197,18 +197,11 @@ def findvideos(item):
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data=httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="(.*?)" ')
video_data = httptools.downloadpage(urls_page, headers=headers).data
servers = scrapertools.find_multiple_matches(video_data,'data-player="(.*?)" data-embed="(.*?)">')
for server, code in servers:
post = {'codigo':code}
post = urllib.urlencode(post)
video_json=jsontools.load(httptools.downloadpage('https://repros.live/player/ajaxdata', post=post).data)
url = video_json['url']
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
itemlist.append(item.clone(title='[%s][%s]',
url=urls_page,
action='play',
language=language,
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist

View File

@@ -83,6 +83,7 @@ def sub_search(item):
data = httptools.downloadpage(item.url + "&_token=" + token, headers=headers).data
data_js = jsontools.load(data)["data"]["m"]
for js in data_js:
js["title"] = quitano(js["title"])
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = js["title"],
@@ -113,6 +114,7 @@ def peliculas_gen(item):
patron += '<p>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
scrapedtitle = quitano(scrapedtitle)
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
@@ -136,6 +138,7 @@ def estrenos(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace("Película ","")
scrapedtitle = quitano(scrapedtitle)
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
@@ -166,6 +169,7 @@ def peliculas(item):
patron += '<p>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
scrapedtitle = quitano(scrapedtitle)
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
@@ -301,3 +305,9 @@ def play(item):
itemlist = servertools.get_servers_itemlist(itemlist)
item.thumbnail = item.contentThumbnail
return itemlist
def quitano(title):
# Quita el año que muestran en el título en la página, para que funcione bien tmdb
t = title.replace(scrapertools.find_single_match(title, '\(\s*\d{4}\)'),"")
return t.strip()

View File

@@ -1,38 +0,0 @@
{
"id": "seriesblancoxyz",
"name": "SeriesBlanco.xyz",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s22.postimg.cc/nucz720sx/image.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Cast",
"Lat",
"VOSE",
"VO"
]
}
]
}

View File

@@ -1,323 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesBlanco.xyz -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesblanco.xyz/'
IDIOMAS = {'Esp':'Cast', 'es': 'Cast', 'la': 'Lat', 'Latino':'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['SD', 'Micro-HD-720p', '720p', 'HDitunes', 'Micro-HD-1080p' ]
list_servers = ['powvideo','yourupload', 'openload', 'gamovideo', 'flashx', 'clipwatching', 'streamango', 'streamcloud']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Nuevos Capitulos",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel,
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
url=host,
))
# itemlist.append(Item(channel=item.channel,
# title="A - Z",
# action="section",
# thumbnail=get_thumb('alphabet', auto=True),
# url=host+'listado/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace ("'", '"')
patron = '<li><div style=.*?><a href="([^"]+)"><img.*?src="([^"]+)" title="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=scrapedtitle,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=base_page+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href="([^"]+)"><i class="fa fa-bookmark-o"></i> (.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<p class='panel-primary btn-primary'> Temporada (\d+)</p>"
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
id = scrapertools.find_single_match(data, "onclick='loadSeason\((\d+),\d+\);")
for scrapedseason in matches:
url = item.url
title = 'Temporada %s' % scrapedseason
contentSeasonNumber = scrapedseason
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel,
action="episodesxseason",
title=title,
url=url,
thumbnail=thumbnail,
id=id,
contentSeasonNumber=contentSeasonNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
season_url = '%sajax/visto3.php?season_id=%s&season_number=%s' % (host, item.id, season)
data = get_source(season_url)
patron = "<a href='([^ ]+)'.*?>.*?\d+x(\d+).*?-([^<]+)<.*?(/banderas.*?)</td>"
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
language=language,
infoLabels=infoLabels
))
itemlist = filtertools.get_links(itemlist, item, list_language)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace("'", '"')
data = scrapertools.find_single_match(data,
'<center>Series Online : Capítulos estrenados recientemente</center>.*?</ul>')
patron = '<li><h6.*?src="([^"]+)".*?aalt="([^"]+)".*?href="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedtitle, scrapedurl, scrapedthumbnail in matches:
url =host+scrapedurl
thumbnail = scrapedthumbnail
season_episode = scrapertools.find_single_match(scrapedtitle, '.*? (\d+x\d+) ')
scrapedtitle= scrapertools.find_single_match(scrapedtitle, '(.*?) \d+x')
title = '%s - %s' % (scrapedtitle, season_episode )
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
language=language,
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
language = []
for lang in languages:
if 'jap' in lang or lang not in IDIOMAS:
lang = 'vos'
if len(languages) == 1:
language = IDIOMAS[lang]
title = '%s [%s]' % (title, language)
else:
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
return title, language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace ("'", '"')
patron = '<a href=([^ ]+) target="_blank"><img src="/servidores/(.*?).(?:png|jpg)".*?sno.*?'
patron += '<span>(.*?)<.*?(/banderas.*?)td'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
infoLabels=item.infoLabels
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search(item, texto):
logger.info()
if texto != '':
item.url = host + 'search.php?q1=%s' % texto
return list_all(item)

View File

@@ -7,7 +7,7 @@ from core import tmdb
from core.item import Item
from platformcode import logger
host = 'http://www.sipeliculas.com'
host = 'http://www.siestrenos.com'
def mainlist(item):
logger.info()

View File

@@ -15,7 +15,7 @@ from platformcode import config, logger, platformtools
idio = {'https://cdn.yape.nu//languajes/la.png': 'LAT','https://cdn.yape.nu//languajes/es.png': 'ESP','https://cdn.yape.nu//languajes/en_es.png': 'VOSE'}
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner'}
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner', 'DVD Rip':'DVD Rip'}
list_language = idio.values()
list_quality = cali.values()

View File

@@ -1,43 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "diskokosmoko",
"name": "diskokosmiko",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
}

View File

@@ -1,50 +0,0 @@
# -*- coding: utf-8 -*-
from channels import kbagi
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
domain = "diskokosmiko.mx"
logueado, error_message = diskokosmiko.login(domain)
if not logueado:
return False, error_message
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
return False, "[%s] El archivo no existe o ha sido borrado" %domain
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"
url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
if url:
url = host + url
fileid = url.rsplit("f=", 1)[1]
token = scrapertools.find_single_match(data,
'<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(url, post, headers).data
data = jsontools.load(data)
mediaurl = data.get("DownloadUrl")
extension = data.get("Extension")
video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -4,7 +4,6 @@
# --------------------------------------------------------
import re
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -29,12 +28,14 @@ def get_video_url(page_url, premium = False, user = "", password = "", video_pas
headers = {'referer': page_url}
for i in range(0, 3):
data = httptools.downloadpage(page_url, headers=headers).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if '<input type=hidden' in data:
if '゚ω゚ノ' in data:
break
else:
page_url = scrapertools.find_single_match(data, "iframe src=(.*?) scrolling")
code = re.findall('<script>\s*゚ω゚(.*?)</script>', data, flags=re.DOTALL)[0]
page_url = scrapertools.find_single_match(data, '"iframe" src="([^"]+)')
if not page_url:
page_url = scrapertools.find_single_match(data, '<input type="hidden" id="link" value="([^"]+)')
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
code = scrapertools.find_single_match(data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
text_decode = aadecode(code)
funcion, clave = re.findall("func\.innerHTML = (\w*)\('([^']*)', ", text_decode, flags=re.DOTALL)[0]
# decodificar javascript en campos html hidden

View File

@@ -7,29 +7,25 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File Does not Exist" in data:
return False, "[Vidto.me] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
# Extrae la URL
# {file:"http://188.240.220.186/drjhpzy4lqqwws4phv3twywfxej5nwmi4nhxlriivuopt2pul3o4bkge5hxa/video.mp4",label:"240p"}
code = scrapertools.find_single_match(data, 'name="code" value="([^"]+)')
hash = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)')
post = "op=download1&code=%s&hash=%s&imhuman=Proceed+to+video" %(code, hash)
data1 = httptools.downloadpage("http://m.vidtome.stream/playvideo/%s" %code, post=post).data
video_urls = []
media_urls = scrapertools.find_multiple_matches(data, '\{file\s*:\s*"([^"]+)",label\s*:\s*"([^"]+)"\}')
for media_url, label in media_urls:
media_urls = scrapertools.find_multiple_matches(data1, 'file: "([^"]+)')
for media_url in media_urls:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append(["%s (%s) [vidto.me]" % (ext, label), media_url])
video_urls.append(["%s [vidto.me]" % (ext), media_url])
video_urls.reverse()
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.info("%s" % (video_url[0]))
return video_urls

View File

@@ -22,11 +22,12 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data1 = httptools.downloadpage("https://xdrive.cc/geo_ip").data
_ip = scrapertools.find_single_match(data1, 'ip":"([^"]+)')
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, "src: '([^']+).*?label: '([^']+)")
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
scrapedquality = scrapedquality.replace("&uarr;","")
video_urls.append([scrapedquality + " [xdrive]", scrapedurl])
video_urls.sort(key=lambda it: int(it[0].split("P ", 1)[0]))
video_id = scrapertools.find_single_match(data, '&video_id=(\d+)')
data = httptools.downloadpage("https://xdrive.cc/secure_link?ip=%s&video_id=%s" %(_ip, video_id)).data.replace("\\","")
videourl = scrapertools.find_multiple_matches(data, '"([^"]+)"')
for scrapedurl in videourl:
video_urls.append(["[xdrive]", scrapedurl])
return video_urls