This commit is contained in:
alfa_addon_10
2017-08-17 01:58:04 +02:00
15 changed files with 260 additions and 617 deletions

View File

@@ -113,6 +113,7 @@ class platform(Platformtools):
JsonData["data"]["viewmode"] = parent_item.viewmode
JsonData["data"]["category"] = parent_item.category.capitalize()
JsonData["data"]["host"] = self.controller.host
if parent_item.url: JsonData["data"]["url"] = parent_item.url
# Recorremos el itemlist
for item in itemlist:

View File

@@ -77,7 +77,9 @@ function get_response(data) {
document.getElementById("itemlist").scrollTop = 0;
show_images();
nav_history.newResponse(item_list, data.category);
nav_history.newResponse(item_list, data.category, data.url);
set_original_url(data.url)
//console.debug(nav_history)
send_data({

View File

@@ -94,7 +94,7 @@ function focus_element(element) {
function image_error(thumbnail) {
var src = thumbnail.src;
if (thumbnail.src.indexOf(domain) == 0) {
thumbnail.src = "http://media.tvalacarta.info/pelisalacarta/thumb_folder2.png";
thumbnail.src = "https://github.com/alfa-addon/addon/raw/master/plugin.video.alfa/resources/media/general/default/thumb_folder.png";
}
else {
thumbnail.src = domain + "/proxy/" + encodeURIComponent(btoa(thumbnail.src));
@@ -104,6 +104,19 @@ function image_error(thumbnail) {
};
};
function set_original_url(url){
currentWebLink = document.getElementById("current_web_link")
if (currentWebLink) {
if (url) {
currentWebLink.style.display = "block";
currentWebLink.href = url;
}
else {
currentWebLink.style.display = "none";
}
}
}
function show_images(){
var container = document.getElementById("itemlist");
var images = container.getElementsByTagName("img");
@@ -222,4 +235,4 @@ function auto_scroll(element) {
function center_window(el) {
el.style.top = document.getElementById("window").offsetHeight / 2 - el.offsetHeight / 2 + "px";
};
};

View File

@@ -59,7 +59,7 @@ var nav_history = {
}
}
},
"newResponse": function (data, category) {
"newResponse": function (data, category, url) {
if (!this.confirmed) {
if (this.states[this.current].focus >= 0) {
document.getElementById("itemlist").children[this.states[this.current].focus].children[0].focus();
@@ -68,6 +68,7 @@ var nav_history = {
this.states[this.current].end = new Date().getTime();
this.states[this.current].data = data;
this.states[this.current].category = category;
this.states[this.current].source_url = url;
this.confirmed = true;
if (settings.builtin_history && !this.from_nav) {
if (this.current > 0) {
@@ -86,6 +87,7 @@ var nav_history = {
this.states[this.current].end = new Date().getTime();
this.states[this.current].data = data;
this.states[this.current].category = category;
this.states[this.current].source_url = url;
this.states = this.states.slice(0, this.current + 1);
}
this.from_nav = false;
@@ -116,6 +118,7 @@ var nav_history = {
if (this.states[this.current].end - this.states[this.current].start > this.cache) {
document.getElementById("itemlist").innerHTML = this.states[this.current].data.join("");
set_category(this.states[this.current].category)
set_original_url(this.states[this.current].source_url)
if (this.states[this.current].focus >= 0) {
document.getElementById("itemlist").children[this.states[this.current].focus].children[0].focus();
}

View File

@@ -129,7 +129,7 @@
<a href="javascript:void(0)" onmouseover="this.focus()" onmouseout="this.blur()" onclick="send_data({'id':this.parentNode.parentNode.RequestID, 'result': 6})"></a>
<a href="javascript:void(0)" onmouseover="this.focus()" onmouseout="this.blur()" onclick="send_data({'id':this.parentNode.parentNode.RequestID, 'result': 7})"></a>
<a href="javascript:void(0)" onmouseover="this.focus()" onmouseout="this.blur()" onclick="send_data({'id':this.parentNode.parentNode.RequestID, 'result': 8})"></a>
</div>
</div>
<div class="window_footer" id="window_footer">
<a href="javascript:void(0)" class="control_button button_ok" onmouseover="this.focus()" onclick="send_data({'id':this.parentNode.parentNode.RequestID, 'result':true});dialog.closeall();loading.show();">Aceptar</a>
<a href="javascript:void(0)" class="control_button button_close" onmouseover="this.focus()" onclick="send_data({'id':this.parentNode.parentNode.RequestID, 'result':null });dialog.closeall();">Cancelar</a>
@@ -214,7 +214,7 @@
<div class="left">
</div>
<div class="links">
<a href="#">Saber más sobre Alfa</a> | <a href="#">Foro</a>
<a href="#" id="current_web_link" target="_blank" hidden>Abrir web original</a>
</div>
<div class="status" id="status">Desconectado</div>
</div>

View File

@@ -8,19 +8,17 @@ from platformcode import config, logger
host = "http://allcalidad.com/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Novedades", action="peliculas", url=host))
itemlist.append(Item(channel=item.channel, title="Por género", action="generos_years", url=host, extra="Genero"))
itemlist.append(Item(channel=item.channel, title="Por año", action="generos_years", url=host, extra=">Año<"))
itemlist.append(Item(channel=item.channel, title="Favoritas", action="peliculas", url=host + "/favorites"))
itemlist.append(Item(channel=item.channel, title=""))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + "?s="))
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero" ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<"))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites" ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s="))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
@@ -57,16 +55,16 @@ def generos_years(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)%s(.*?)</ul></div>' % item.extra
patron = '(?s)%s(.*?)</ul></div>' %item.extra
bloque = scrapertools.find_single_match(data, patron)
patron = 'href="([^"]+)'
patron = 'href="([^"]+)'
patron += '">([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in matches:
itemlist.append(Item(channel=item.channel,
action="peliculas",
title=titulo,
url=url
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = titulo,
url = url
))
return itemlist
@@ -86,22 +84,22 @@ def peliculas(item):
year = scrapertools.find_single_match(varios, 'Año.*?kinopoisk">([^<]+)')
year = scrapertools.find_single_match(year, '[0-9]{4}')
mtitulo = titulo + " (" + idioma + ") (" + year + ")"
new_item = Item(channel=item.channel,
action="findvideos",
title=mtitulo,
fulltitle=titulo,
thumbnail=thumbnail,
url=url,
contentTitle=titulo,
contentType="movie"
)
new_item = Item(channel = item.channel,
action = "findvideos",
title = mtitulo,
fulltitle = titulo,
thumbnail = thumbnail,
url = url,
contentTitle = titulo,
contentType="movie"
)
if year:
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
if url_pagina != "":
pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)")
itemlist.append(Item(channel=item.channel, action="peliculas", title=pagina, url=url_pagina))
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina))
return itemlist
@@ -120,25 +118,24 @@ def findvideos(item):
elif "vimeo" in url:
url += "|" + "http://www.allcalidad.com"
itemlist.append(
Item(channel=item.channel,
action="play",
title=titulo,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
server=server,
url=url
Item(channel = item.channel,
action = "play",
title = titulo,
fulltitle = item.fulltitle,
thumbnail = item.thumbnail,
server = "",
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel=item.channel))
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url,
thumbnail=item.thumbnail,
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))
return itemlist

View File

@@ -1,40 +0,0 @@
{
"id": "mocosoftx",
"name": "MocosoftX",
"active": true,
"adult": true,
"language": "es",
"thumbnail": "mocosoftx.png",
"banner": "mocosoftx.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "05/08/2016",
"description": "Eliminado de sección películas."
}
],
"categories": [
"adult"
],
"settings": [
{
"id": "mocosoftxuser",
"type": "text",
"label": "@30014",
"enabled": true,
"visible": true
},
{
"id": "mocosoftxpassword",
"type": "text",
"label": "@30015",
"enabled": true,
"visible": true,
"hidden": true
}
]
}

View File

@@ -1,207 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
MAIN_HEADERS = []
MAIN_HEADERS.append(["Host", "mocosoftx.com"])
MAIN_HEADERS.append(["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:8.0) Gecko/20100101 Firefox/8.0"])
MAIN_HEADERS.append(["Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"])
MAIN_HEADERS.append(["Accept-Language", "es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"])
MAIN_HEADERS.append(["Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7"])
MAIN_HEADERS.append(["Connection", "keep-alive"])
# Login:
# <form action="http://mocosoftx.com/foro/login2/" method="post" accept-charset="ISO-8859-1" onsubmit="hashLoginPassword(this, '3e468fdsab5d9');" >
# pst: user=blablabla&passwrd=&cookielength=-1&hash_passwrd=78e88DSe408508d22f
# doForm.hash_passwrd.value = hex_sha1(hex_sha1(doForm.user.value.php_to8bit().php_strtolower() + doForm.passwrd.value.php_to8bit()) + cur_session_id);
def login():
# Averigua el id de sesión
data = scrapertools.cache_page("http://mocosoftx.com/foro/login/")
cur_session_id = scrapertools.get_match(data,
'form action="[^"]+" name="frmLogin" id="frmLogin" method="post" accept-charset="ISO-8859-1" onsubmit="hashLoginPassword\(this, \'([a-z0-9]+)\'')
cur_session_id = "c95633073dc6afaa813d33b2bfeda520"
logger.info("cur_session_id=" + cur_session_id)
# Calcula el hash del password
email = config.get_setting("mocosoftxuser", "mocosoftx")
password = config.get_setting("mocosoftxpassword", "mocosoftx")
logger.info("email=" + email)
logger.info("password=" + password)
# doForm.hash_passwrd.value = hex_sha1(hex_sha1(doForm.user.value.php_to8bit().php_strtolower() + doForm.passwrd.value.php_to8bit()) + cur_session_id);
hash_passwrd = scrapertools.get_sha1(scrapertools.get_sha1(email.lower() + password.lower()) + cur_session_id)
logger.info("hash_passwrd=" + hash_passwrd)
# Hace el submit del email
# post = "user="+email+"&passwrd=&cookieneverexp=on&hash_passwrd="+hash_passwrd
post = urllib.urlencode({'user': email, "passwrd": password}) + "&cookieneverexp=on&hash_passwrd="
logger.info("post=" + post)
headers = []
headers.append(["Host", "mocosoftx.com"])
headers.append(["User-Agent",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36"])
headers.append(["Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"])
headers.append(["Accept-Language", "es-ES,es;q=0.8,en;q=0.6,gl;q=0.4"])
headers.append(["Accept-Encoding", "gzip, deflate"])
headers.append(["Connection", "keep-alive"])
headers.append(["Referer", "http://mocosoftx.com/foro/login/"])
headers.append(["Origin", "http://mocosoftx.com"])
headers.append(["Content-Type", "application/x-www-form-urlencoded"])
headers.append(["Content-Length", str(len(post))])
headers.append(["Cache-Control", "max-age=0"])
headers.append(["Upgrade-Insecure-Requests", "1"])
data = scrapertools.cache_page("http://mocosoftx.com/foro/login2/", post=post, headers=headers)
logger.info("data=" + data)
return True
def mainlist(item):
logger.info()
itemlist = []
if config.get_setting("mocosoftxuser", "mocosoftx") == "":
itemlist.append(
Item(channel=item.channel, title="Habilita tu cuenta en la configuración...", action="settingCanal",
url=""))
else:
if login():
item.url = "http://mocosoftx.com/foro/forum/"
itemlist = foro(item)
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
else:
itemlist.append(
Item(channel=item.channel, title="Cuenta incorrecta, revisa la configuración...", action="", url="",
folder=False))
return itemlist
def settingCanal(item):
return platformtools.show_channel_settings()
def foro(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url, headers=MAIN_HEADERS)
# Extrae los foros y subforos
patron = '<h4><a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8")
title = ">> Foro " + scrapedtitle
url = urlparse.urljoin(item.url, scrapedurl)
# http://mocosoftx.com/foro/fotos-hentai/?PHPSESSID=nflddqf9nvbm2dd92
if "PHPSESSID" in url:
url = scrapertools.get_match(url, "(.*?)\?PHPSESSID=")
thumbnail = ""
plot = ""
itemlist.append(Item(channel=item.channel, title=title, action="foro", url=url, plot=plot, thumbnail=thumbnail,
folder=True))
# Extrae los hilos individuales
patron = '<td class="icon2 windowbgb">[^<]+'
patron += '<img src="([^"]+)"[^<]+'
patron += '</td>[^<]+'
patron += '<td class="subject windowbgb2">[^<]+'
patron += '<div >[^<]+'
patron += '<span id="msg_\d+"><a href="([^"]+)">([^>]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = urlparse.urljoin(item.url, scrapedurl)
if "PHPSESSID" in url:
url = scrapertools.get_match(url, "(.*?)\?PHPSESSID=")
thumbnail = scrapedthumbnail
plot = ""
itemlist.append(
Item(channel=item.channel, title=title, action="findvideos", url=url, plot=plot, thumbnail=thumbnail,
folder=True))
# Extrae la marca de siguiente página
# <a class="navPages" href="http://mocosoftx.com/foro/peliculas-xxx-online-(completas)/20/?PHPSESSID=rpejdrj1trngh0sjdp08ds0ef7">2</a>
patronvideos = '<strong>\d+</strong[^<]+<a class="navPages" href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedtitle = ">> Página siguiente"
scrapedurl = urlparse.urljoin(item.url, matches[0])
if "PHPSESSID" in scrapedurl:
scrapedurl = scrapertools.get_match(scrapedurl, "(.*?)\?PHPSESSID=")
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(Item(channel=item.channel, title=scrapedtitle, action="foro", url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, folder=True))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
# Busca el thumbnail y el argumento
data = scrapertools.cache_page(item.url)
logger.info("data=" + data)
try:
thumbnail = scrapertools.get_match(data, '<div class="post">.*?<img src="([^"]+)"')
except:
thumbnail = ""
plot = ""
# Ahora busca los vídeos
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.plot = plot
videoitem.thumbnail = thumbnail
videoitem.fulltitle = item.title
parsed_url = urlparse.urlparse(videoitem.url)
fichero = parsed_url.path
partes = fichero.split("/")
titulo = partes[len(partes) - 1]
videoitem.title = titulo + " - [" + videoitem.server + "]"
if not itemlist:
patron = '<a href="([^"]+)" class="bbc_link" target="_blank"><span style="color: orange;" class="bbc_color">'
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
data = scrapertools.cache_page(matches[0])
logger.info(data)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.plot = plot
videoitem.thumbnail = thumbnail
videoitem.fulltitle = item.title
parsed_url = urlparse.urlparse(videoitem.url)
fichero = parsed_url.path
partes = fichero.split("/")
titulo = partes[len(partes) - 1]
videoitem.title = titulo + " - [" + videoitem.server + "]"
return itemlist

View File

@@ -345,13 +345,13 @@ def temporadas(item):
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
data = data.replace ('"',"'")
realplot = ''
patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)</button>"
patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)<\/button>"
matches = re.compile(patron, re.DOTALL).findall(data)
serieid = scrapertools.find_single_match(data, 'data-nonce="(.*?)"')
serieid = scrapertools.find_single_match(data, "data-nonce='(.*?)'")
item.thumbnail = item.thumbvid
infoLabels = item.infoLabels
for scrapedtitle in matches:
@@ -408,6 +408,7 @@ def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace('"', "'")
patron = "<button class='classnamer' onclick='javascript: mostrarenlaces\(([^\)]+)\).*?<"
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -640,7 +641,7 @@ def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
if 'streamplay' not in item.server or 'streame' not in item.server:
if item.server not in ['streamplay','streame']:
url = scrapertools.find_single_match(data, '<(?:IFRAME|iframe).*?(?:SRC|src)=*([^ ]+) (?!style|STYLE)')
else:
url = scrapertools.find_single_match(data, '<meta http-equiv="refresh" content="0; url=([^"]+)">')

View File

@@ -1,182 +1,185 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = 'http://www.tvseriesdk.com/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Ultimos", action="last_episodes", url=host))
itemlist.append(item.clone(title="Todas", action="list_all", url=host))
itemlist.append(item.clone(title="Buscar", action="search", url='http://www.tvseriesdk.com/index.php?s='))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
global i
templist = []
data = get_source(item.url)
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) title=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 10:
if item.next_page != 10:
url_next_page = item.url
matches = matches[:10]
next_page = 10
item.i = 0
else:
patron = matches[item.i:][:10]
next_page = 10
url_next_page = item.url
for scrapedurl, scrapedplot, scrapedtitle in matches:
url = scrapedurl
plot = scrapedplot
contentSerieName = scrapedtitle
title = contentSerieName
templist.append(item.clone(action='episodios',
title=title,
url=url,
thumbnail='',
plot=plot,
contentErieName=contentSerieName
))
itemlist = get_thumb(templist)
# Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i))
return itemlist
def last_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class=pelis>.*?<a href=(.*?) title=(.*?)><img src=(.*?) alt='
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a href=(.*?) class=lcc>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
n_ep = 1
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
scrapedtitle = re.sub(r'Capítulo \d+', '', scrapedtitle)
title = '1x%s - %s' % (n_ep, scrapedtitle)
itemlist.append(
item.clone(action='findvideos', title=title, url=url, contentEpisodeNumber=n_ep, contentSeasonNumber='1'))
n_ep += 1
return itemlist
def get_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
item.thumbnail = scrapertools.find_single_match(data, '<div class=sinope><img src=(.*?) alt=')
return itemlist
def search_list(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'img title.*?src=(.*?) width=.*?class=tisearch><a href=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
itemlist.append(item.clone(title=title, url=url, thumbnail=thumbnail, action='findvideos'))
# Pagination < link
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_list(item)
else:
return []
def findvideos(item):
logger.info()
itemlist = []
servers = {'netu': 'http://hqq.tv/player/embed_player.php?vid=',
'open': 'https://openload.co/embed/',
'netv': 'http://goo.gl/',
'gamo': 'http://gamovideo.com/embed-',
'powvideo': 'http://powvideo.net/embed-',
'play': 'http://streamplay.to/embed-',
'vido': 'http://vidoza.net/embed-'}
data = get_source(item.url)
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'
matches = re.compile(patron, re.DOTALL).findall(data)
for server, video_id in matches:
if server not in ['gamo', 'powvideo', 'play', 'vido', 'netv']:
url = servers[server] + video_id
elif server == 'netv':
url = get_source(servers[server] + video_id)
else:
url = servers[server] + video_id + '.html'
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = item.title + ' (%s)' % videoitem.server
videoitem.action = 'play'
return itemlist
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = 'http://www.tvseriesdk.com/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Ultimos", action="last_episodes", url=host))
itemlist.append(item.clone(title="Todas", action="list_all", url=host))
itemlist.append(item.clone(title="Buscar", action="search", url='http://www.tvseriesdk.com/index.php?s='))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
global i
templist = []
data = get_source(item.url)
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) title=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 10:
if item.next_page != 10:
url_next_page = item.url
matches = matches[:10]
next_page = 10
item.i = 0
else:
patron = matches[item.i:][:10]
next_page = 10
url_next_page = item.url
for scrapedurl, scrapedplot, scrapedtitle in matches:
url = scrapedurl
plot = scrapedplot
contentSerieName = scrapedtitle
title = contentSerieName
templist.append(item.clone(action='episodios',
title=title,
url=url,
thumbnail='',
plot=plot,
contentErieName=contentSerieName
))
itemlist = get_thumb(templist)
# Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i))
return itemlist
def last_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class=pelis>.*?<a href=(.*?) title=(.*?)><img src=(.*?) alt='
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a href=(.*?) class=lcc>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
n_ep = 1
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
scrapedtitle = re.sub(r'Capítulo \d+', '', scrapedtitle)
title = '1x%s - %s' % (n_ep, scrapedtitle)
itemlist.append(
item.clone(action='findvideos', title=title, url=url, contentEpisodeNumber=n_ep, contentSeasonNumber='1'))
n_ep += 1
return itemlist
def get_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
item.thumbnail = scrapertools.find_single_match(data, '<div class=sinope><img src=(.*?) alt=')
return itemlist
def search_list(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'img title.*?src=(.*?) width=.*?class=tisearch><a href=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
itemlist.append(item.clone(title=title, url=url, thumbnail=thumbnail, action='findvideos'))
# Pagination < link
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_list(item)
else:
return []
def findvideos(item):
logger.info()
itemlist = []
servers = {'netu': 'http://hqq.tv/player/embed_player.php?vid=',
'open': 'https://openload.co/embed/',
'netv': 'http://goo.gl/',
'gamo': 'http://gamovideo.com/embed-',
'powvideo': 'http://powvideo.net/embed-',
'play': 'http://streamplay.to/embed-',
'vido': 'http://vidoza.net/embed-'}
data = get_source(item.url)
noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>')
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'
matches = re.compile(patron, re.DOTALL).findall(data)
if not noemitido:
for server, video_id in matches:
if server not in ['gamo', 'powvideo', 'play', 'vido', 'netv']:
url = servers[server] + video_id
elif server == 'netv':
url = get_source(servers[server] + video_id)
else:
url = servers[server] + video_id + '.html'
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = item.title + ' (%s)' % videoitem.server
videoitem.action = 'play'
else:
itemlist.append(item.clone(title = 'Este capitulo aun no esta disponible', action='', url=''))
return itemlist

View File

@@ -9,7 +9,6 @@
"version": 1,
"categories": [
"movie",
"latino",
"direct"
"latino"
]
}

View File

@@ -15,7 +15,7 @@ from core import servertools
from core.item import Item
from platformcode import config, logger
host = "http://ver-peliculas.io/"
host = "http://ver-peliculas.org/"
def mainlist(item):
@@ -156,18 +156,18 @@ def findvideos(item):
data = get_source(item.url)
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
movie_info = scrapertools.find_single_match(item.url, 'http:\/\/ver-peliculas\.io\/peliculas\/(\d+)-(.*?)-\d{'
movie_info = scrapertools.find_single_match(item.url, 'http:\/\/ver-peliculas\.org\/peliculas\/(\d+)-(.*?)-\d{'
'4}-online\.')
movie_id = movie_info[0]
movie_name = movie_info[1]
sub = video_info[1]
url_base = 'http://ver-peliculas.io/core/api.php?id=%s&slug=%s' % (movie_id, movie_name)
url_base = 'http://ver-peliculas.org/core/api.php?id=%s&slug=%s' % (movie_id, movie_name)
data = httptools.downloadpage(url_base).data
json_data = jsontools.load(data)
video_list = json_data['lista']
itemlist = []
for videoitem in video_list:
video_base_url = 'http://ver-peliculas.io/core/videofinal.php'
video_base_url = 'http://ver-peliculas.org/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
languages = ['latino', 'spanish', 'subtitulos']
@@ -184,12 +184,12 @@ def findvideos(item):
for video_link in sources:
url = video_link['sources']
if 'onevideo' in url:
data = get_source(url)
g_urls = servertools.findvideos(data=data)
url = g_urls[0][1]
server = g_urls[0][0]
if url not in duplicated:
# if 'onevideo' in url:
# data = get_source(url)
# g_urls = servertools.findvideos(data=data)
# url = g_urls[0][1]
# server = g_urls[0][0]
if url not in duplicated and server!='drive':
lang = lang.capitalize()
if lang == 'Spanish':
lang = 'Español'

View File

@@ -26,12 +26,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
referer = re.sub(r"embed-|player-", "", page_url)[:-5]
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
for list in scrapertools.find_multiple_matches(data, '_[^=]+=(\[[^\]]+\]);'):
if len(list) == 703 or len(list) == 711:
key = "".join(eval(list)[7:9])
break
if key.startswith("embed"):
key = key[6:] + key[:6]
matches = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
data = jsunpack.unpack(matches).replace("\\", "")
@@ -40,7 +35,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
for video_url in matches:
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{40,}')
hash = decrypt(_hash, key)
hash = _hash[::-1]
hash = hash.replace(hash[2:3],"",1)
video_url = video_url.replace(_hash, hash)
filename = scrapertools.get_filename_from_url(video_url)[-4:]

View File

@@ -1,64 +0,0 @@
{
"active": true,
"changes": [
{
"date": "25/03/2016",
"description": "Versión incial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "userporn.com\\/f\\/([A-Z0-9a-z]{12}).swf",
"url": "http://www.userporn.com/video/\\1"
},
{
"pattern": "userporn.com\\/video\\/([A-Z0-9a-z]{12})",
"url": "http://www.userporn.com/video/\\1"
},
{
"pattern": "userporn.com\\/e\\/([A-Z0-9a-z]{12})",
"url": "http://www.userporn.com/video/\\1"
},
{
"pattern": "http\\:\\/\\/(?:www\\.)?userporn.com\\/(?:(?:e/|flash/)|(?:(?:video/|f/)))?([a-zA-Z0-9]{0,12})",
"url": "http://www.userporn.com/video/\\1"
}
]
},
"free": true,
"id": "userporn",
"name": "userporn",
"premium": [
"realdebrid",
"alldebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"version": 1
}

View File

@@ -1,61 +0,0 @@
# -*- coding: utf-8 -*-
import base64
import re
from core import scrapertools
from platformcode import logger
HOSTER_KEY = "NTI2NzI5Cgo="
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
# Espera un poco como hace el player flash
logger.info("waiting 3 secs")
import time
time.sleep(3)
# Obtiene el id
code = Extract_id(page_url)
# Descarga el json con los detalles del vídeo
# http://www.userporn.com/player_control/settings.php?v=dvthddkC7l4J&em=TRUE&fv=v1.1.45
controluri = "http://userporn.com/player_control/settings.php?v=" + code + "&em=TRUE&fv=v1.1.45"
datajson = scrapertools.cachePage(controluri)
# logger.info("response="+datajson);
# Convierte el json en un diccionario
datajson = datajson.replace("false", "False").replace("true", "True")
datajson = datajson.replace("null", "None")
datadict = eval("(" + datajson + ")")
# Formatos
formatos = datadict["settings"]["res"]
for formato in formatos:
uri = base64.decodestring(formato["u"])
resolucion = formato["l"]
import videobb
video_url = videobb.build_url(uri, HOSTER_KEY, datajson)
video_urls.append(["%s [userporn]" % resolucion, video_url.replace(":80", "")])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def Extract_id(url):
_VALID_URL = r'^((?:http://)?(?:\w+\.)?userporn\.com/(?:(?:(?:e/)|(?:video/))|(?:(?:flash/)|(?:f/)))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
# Extract video id from URL
mobj = re.match(_VALID_URL, url)
if mobj is None:
logger.info('ERROR: URL invalida: %s' % url)
return ""
id = mobj.group(2)
logger.info("extracted code=" + id)
return id