Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
df0607ec90 | ||
|
|
d83a49743c | ||
|
|
66762b2c46 | ||
|
|
79c761206d | ||
|
|
f04647f348 | ||
|
|
0f81113225 | ||
|
|
169c09db16 | ||
|
|
306bb6533d | ||
|
|
210e90cb96 | ||
|
|
74e53f362b | ||
|
|
947cb7f51f | ||
|
|
f88ca81ff5 | ||
|
|
42cd9ac14b | ||
|
|
b7520145bb | ||
|
|
209af696b2 | ||
|
|
03589b9c39 | ||
|
|
a3337df4da | ||
|
|
acf7f9a27a | ||
|
|
8082e1b244 | ||
|
|
9345115869 | ||
|
|
7ae8b203b6 | ||
|
|
56c16f2922 | ||
|
|
7e47e3ae59 | ||
|
|
9eef89d1b0 | ||
|
|
2b3d81c9a0 | ||
|
|
876b02b81f | ||
|
|
8028290051 | ||
|
|
78252d3452 | ||
|
|
9aa77400d5 | ||
|
|
5d592f724d | ||
|
|
d288031a83 | ||
|
|
41a39ff02b | ||
|
|
0bad69a7cb | ||
|
|
74e6145d2f | ||
|
|
c344832c8c | ||
|
|
a9caf59ce1 | ||
|
|
770a2e215a | ||
|
|
28d99deb48 | ||
|
|
23ac80fbd6 | ||
|
|
9a5ddfbccb | ||
|
|
50bbf7d9aa | ||
|
|
2aab5ae0ff | ||
|
|
1bbc51a885 | ||
|
|
f95c3621d4 | ||
|
|
f05cbba109 | ||
|
|
16968f9204 | ||
|
|
8985f3ebdd | ||
|
|
d60c246bbb | ||
|
|
3b29fe47bb | ||
|
|
3093f72ce5 | ||
|
|
55dcf3f091 | ||
|
|
2924b6958d | ||
|
|
927310c7c6 | ||
|
|
0c25891790 | ||
|
|
212c06057f | ||
|
|
9c3b3e9256 | ||
|
|
6dc853b41e | ||
|
|
7afd09dfa9 | ||
|
|
6855508eaa | ||
|
|
2925c29671 | ||
|
|
506e68e8a3 | ||
|
|
9cc30152f8 | ||
|
|
267c9d8031 | ||
|
|
bd68b83b6c | ||
|
|
c1f8039672 |
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.3" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.7" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,15 +19,15 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» serieslan » animemovil
|
||||
» mundiseries » bajui
|
||||
» seriesblanco » descargamix
|
||||
» miradetodo » pelisgratis
|
||||
» tvseriesdk » ultrapeliculashd
|
||||
» gamovideo » flashx
|
||||
» danimados ¤ arreglos internos
|
||||
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
» anitoonstv » asialiveaction
|
||||
» cinehindi » danimados
|
||||
» mundiseries » pelisculashndu
|
||||
» seodiv » serieslan
|
||||
» crunchyroll » pelisfox
|
||||
» stormo ¤ arreglos internos
|
||||
[COLOR green]Gracias a [COLOR yellow]Danielr460, numa00009 y numa00009[/COLOR]
|
||||
por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
@@ -59,6 +57,7 @@ def colecciones(item):
|
||||
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "listado_colecciones",
|
||||
page = 1,
|
||||
thumbnail = host + scrapedthumbnail,
|
||||
title = title,
|
||||
url = host + scrapedurl
|
||||
@@ -71,7 +70,7 @@ def listado_colecciones(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
|
||||
post = "page=1"
|
||||
post = "page=%s" %item.page
|
||||
data = httptools.downloadpage(host + data_url, post=post).data
|
||||
patron = 'a href="(/peli[^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
@@ -88,6 +87,16 @@ def listado_colecciones(item):
|
||||
url = host + scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
item.page += 1
|
||||
post = "page=%s" %item.page
|
||||
data = httptools.downloadpage(host + data_url, post=post).data
|
||||
if len(data) > 50:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "listado_colecciones",
|
||||
title = "Pagina siguiente>>",
|
||||
page = item.page,
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -159,6 +168,7 @@ def lista(item):
|
||||
params = jsontools.dump(dict_param)
|
||||
|
||||
data = httptools.downloadpage(item.url, post=params).data
|
||||
data = data.replace("<mark>","").replace("<\/mark>","")
|
||||
dict_data = jsontools.load(data)
|
||||
|
||||
for it in dict_data["items"]:
|
||||
@@ -167,7 +177,7 @@ def lista(item):
|
||||
rating = it["imdb"]
|
||||
year = it["year"]
|
||||
url = host + "pelicula/" + it["slug"]
|
||||
thumb = urlparse.urljoin(host, it["image"])
|
||||
thumb = host + it["image"]
|
||||
item.infoLabels['year'] = year
|
||||
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
|
||||
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
|
||||
|
||||
@@ -127,11 +127,21 @@ def episodios(item):
|
||||
plot=scrapedplot, url=url, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
return itemlist
|
||||
|
||||
def googl(url):
|
||||
logger.info()
|
||||
a=url.split("/")
|
||||
link=a[3]
|
||||
link="http://www.trueurl.net/?q=http%3A%2F%2Fgoo.gl%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
|
||||
data_other = httptools.downloadpage(link).data
|
||||
data_other = re.sub(r"\n|\r|\t|\s{2}| ", "", data_other)
|
||||
patron='<td class="withbg">Destination URL<\/td><td><A title="(.+?)"'
|
||||
trueurl = scrapertools.find_single_match(data_other, patron)
|
||||
return trueurl
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
@@ -147,16 +157,23 @@ def findvideos(item):
|
||||
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
|
||||
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
|
||||
for server, quality, url in itemla:
|
||||
if "HQ" in quality:
|
||||
quality = "HD"
|
||||
if "Calidad Alta" in quality:
|
||||
quality = quality.replace("Calidad Alta", "HQ")
|
||||
quality = "HQ"
|
||||
if " Calidad media - Carga mas rapido" in quality:
|
||||
quality = quality.replace(" Calidad media - Carga mas rapido", "360p")
|
||||
quality = "360p"
|
||||
server = server.lower().strip()
|
||||
if "ok" == server:
|
||||
if "ok" in server:
|
||||
server = 'okru'
|
||||
if "rapid" in server:
|
||||
server = 'rapidvideo'
|
||||
if "netu" in server:
|
||||
server = 'netutv'
|
||||
url = googl(url)
|
||||
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
|
||||
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
@@ -180,7 +180,7 @@ def findvideos(item):
|
||||
show = item.show
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie":
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
|
||||
|
||||
@@ -150,7 +150,7 @@ def episodios(item):
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -123,7 +123,7 @@ def lista(item):
|
||||
|
||||
if next_page_url != "":
|
||||
item.url = next_page_url
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
|
||||
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
|
||||
return itemlist
|
||||
|
||||
@@ -132,14 +132,18 @@ def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist1 = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
itemlist1.extend(servertools.find_video_items(data=data))
|
||||
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
|
||||
show = scrapertools.find_single_match(data, patron_show)
|
||||
for videoitem in itemlist:
|
||||
for videoitem in itemlist1:
|
||||
videoitem.channel = item.channel
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
for i in range(len(itemlist1)):
|
||||
if not 'youtube' in itemlist1[i].title:
|
||||
itemlist.append(itemlist1[i])
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
|
||||
|
||||
@@ -168,11 +168,11 @@ def episodios(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t|\s{2,}', '', data)
|
||||
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)" ' \
|
||||
'style="width: (.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
|
||||
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
|
||||
'style="width:(.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
|
||||
'\s*(.*?)</p>.*?description":"([^"]+)"'
|
||||
if data.count('class="season-dropdown') > 1:
|
||||
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+" title="([^"]+)"(.*?)</ul>')
|
||||
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+".*?title="([^"]+)"(.*?)</ul>')
|
||||
for season, b in bloques:
|
||||
matches = scrapertools.find_multiple_matches(b, patron)
|
||||
if matches:
|
||||
@@ -209,7 +209,6 @@ def episodios(item):
|
||||
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id,
|
||||
server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle,
|
||||
contentSerieName=item.contentSerieName, contentType="tvshow"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -32,8 +32,8 @@ def mainlist(item):
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
|
||||
thumbnail=thumb_series))
|
||||
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host,
|
||||
# thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/",
|
||||
thumbnail=thumb_series))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -82,7 +82,6 @@ def mainpage(item):
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
@@ -90,15 +89,26 @@ def lista(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>')
|
||||
if item.title=="Peliculas Animadas":
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<div id="archive-content" class="animation-2 items">(.*)<a href=\'')
|
||||
else:
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<div class="items">(.+?)<\/div><\/div><div class=.+?>')
|
||||
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
|
||||
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron)
|
||||
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
if item.title=="Peliculas Animadas":
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
|
||||
plot=scrapedplot, action="findvideos", show=scrapedtitle))
|
||||
else:
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if item.title!="Peliculas Animadas":
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -124,7 +134,7 @@ def episodios(item):
|
||||
action="findvideos", title=title, url=scrapedurl, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
|
||||
@@ -141,6 +151,7 @@ def findvideos(item):
|
||||
data = scrapertools.find_single_match(data,
|
||||
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
|
||||
patron='src="(.+?)"'
|
||||
logger.info("assfxxv "+data)
|
||||
itemla = scrapertools.find_multiple_matches(data,patron)
|
||||
for i in range(len(itemla)):
|
||||
#for url in itemla:
|
||||
@@ -152,6 +163,8 @@ def findvideos(item):
|
||||
server='okru'
|
||||
else:
|
||||
server=''
|
||||
if "youtube" in url:
|
||||
server='youtube'
|
||||
if "openload" in url:
|
||||
server='openload'
|
||||
if "google" in url:
|
||||
@@ -166,6 +179,10 @@ def findvideos(item):
|
||||
title="NO DISPONIBLE"
|
||||
if title!="NO DISPONIBLE":
|
||||
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", contentTitle=item.show))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
7
plugin.video.alfa/channels/help.json
Normal file
7
plugin.video.alfa/channels/help.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "help",
|
||||
"name": "Ayuda",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["*"]
|
||||
}
|
||||
224
plugin.video.alfa/channels/help.py
Normal file
224
plugin.video.alfa/channels/help.py
Normal file
@@ -0,0 +1,224 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import xbmc
|
||||
|
||||
from core.item import Item
|
||||
from platformcode import config, logger, platformtools
|
||||
from channelselector import get_thumb
|
||||
|
||||
if config.is_xbmc():
|
||||
|
||||
import xbmcgui
|
||||
|
||||
class TextBox(xbmcgui.WindowXMLDialog):
|
||||
""" Create a skinned textbox window """
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.title = kwargs.get('title')
|
||||
self.text = kwargs.get('text')
|
||||
self.doModal()
|
||||
|
||||
def onInit(self):
|
||||
try:
|
||||
self.getControl(5).setText(self.text)
|
||||
self.getControl(1).setLabel(self.title)
|
||||
except:
|
||||
pass
|
||||
|
||||
def onClick(self, control_id):
|
||||
pass
|
||||
|
||||
def onFocus(self, control_id):
|
||||
pass
|
||||
|
||||
def onAction(self, action):
|
||||
# self.close()
|
||||
if action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]:
|
||||
self.close()
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if config.is_xbmc():
|
||||
itemlist.append(Item(channel=item.channel, action="", title="FAQ:",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - ¿Cómo reportar un error?",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="report_error"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - ¿Se pueden activar/desactivar los canales?",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="onoff_canales"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - ¿Es posible la sincronización automática con Trakt?",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="trakt_sync"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - ¿Es posible mostrar todos los resultados juntos en el buscador global?",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="buscador_juntos"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - Los enlaces tardan en aparecer.",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="tiempo_enlaces"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - La búsqueda de contenido no se hace correctamente.",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="prob_busquedacont"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - Algún canal no funciona correctamente.",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="canal_fallo"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - Los enlaces Torrent no funcionan.",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra="prob_torrent"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - No se actualiza correctamente la videoteca.",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=True, extra="prob_bib"))
|
||||
itemlist.append(Item(channel=item.channel, action="faq",
|
||||
title=" - Enlaces de interés",
|
||||
thumbnail=get_thumb("help.png"),
|
||||
folder=False, extra=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def faq(item):
|
||||
|
||||
if item.extra == "onoff_canales":
|
||||
respuesta = platformtools.dialog_yesno("Alfa",
|
||||
"Esto se puede hacer en 'Configuración'>'Activar/Desactivar canales'. "
|
||||
"Puedes activar/desactivar los canales uno por uno o todos a la vez. ",
|
||||
"¿Deseas gestionar ahora los canales?")
|
||||
if respuesta == 1:
|
||||
from channels import setting
|
||||
setting.conf_tools(Item(extra='channels_onoff'))
|
||||
|
||||
elif item.extra == "trakt_sync":
|
||||
respuesta = platformtools.dialog_yesno("Alfa",
|
||||
"Actualmente se puede activar la sincronización (silenciosa) "
|
||||
"tras marcar como visto un episodio (esto se hace automáticamente). "
|
||||
"Esta opción se puede activar en 'Configuración'>'Ajustes "
|
||||
"de la videoteca'.",
|
||||
"¿Deseas acceder a dichos ajustes?")
|
||||
if respuesta == 1:
|
||||
from channels import videolibrary
|
||||
videolibrary.channel_config(Item(channel='videolibrary'))
|
||||
|
||||
elif item.extra == "tiempo_enlaces":
|
||||
respuesta = platformtools.dialog_yesno("Alfa",
|
||||
"Esto puede mejorarse limitando el número máximo de "
|
||||
"enlaces o mostrandolos en una ventana emergente. "
|
||||
"Estas opciones se encuentran en 'Configuración'>'Ajustes "
|
||||
"de la videoteca'.",
|
||||
"¿Deseas acceder a dichos ajustes?")
|
||||
if respuesta == 1:
|
||||
from channels import videolibrary
|
||||
videolibrary.channel_config(Item(channel='videolibrary'))
|
||||
|
||||
elif item.extra == "prob_busquedacont":
|
||||
title = "Alfa - FAQ - %s" % item.title[6:]
|
||||
text = ("Puede que no hayas escrito la ruta de la librería correctamente en "
|
||||
"'Configuración'>'Preferencias'.\n"
|
||||
"La ruta específicada debe ser exactamente la misma de la 'fuente' "
|
||||
"introducida en 'Archivos' de la videoteca de Kodi.\n"
|
||||
"AVANZADO: Esta ruta también se encuentra en 'sources.xml'.\n"
|
||||
"También puedes estar experimentando problemas por estar "
|
||||
"usando algun fork de Kodi y rutas con 'special://'. "
|
||||
"SPMC, por ejemplo, tiene problemas con esto, y no parece tener solución, "
|
||||
"ya que es un problema ajeno a Alfa que existe desde hace mucho.\n"
|
||||
"Puedes intentar subsanar estos problemas en 'Configuración'>'Ajustes de "
|
||||
"la videoteca', cambiando el ajuste 'Realizar búsqueda de contenido en' "
|
||||
"de 'La carpeta de cada serie' a 'Toda la videoteca'."
|
||||
"También puedes acudir a 'http://alfa-addon.com' en busca de ayuda.")
|
||||
|
||||
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
|
||||
|
||||
elif item.extra == "canal_fallo":
|
||||
title = "Alfa - FAQ - %s" % item.title[6:]
|
||||
text = ("Puede ser que la página web del canal no funcione. "
|
||||
"En caso de que funcione la página web puede que no seas el primero"
|
||||
" en haberlo visto y que el canal este arreglado. "
|
||||
"Puedes mirar en 'alfa-addon.com' o en el "
|
||||
"repositorio de GitHub (github.com/alfa-addon/addon). "
|
||||
"Si no encuentras el canal arreglado puedes reportar un "
|
||||
"problema en el foro.")
|
||||
|
||||
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
|
||||
|
||||
elif item.extra == "prob_bib":
|
||||
platformtools.dialog_ok("Alfa",
|
||||
"Puede ser que hayas actualizado el plugin recientemente "
|
||||
"y que las actualizaciones no se hayan aplicado del todo "
|
||||
"bien. Puedes probar en 'Configuración'>'Otras herramientas', "
|
||||
"comprobando los archivos *_data.json o "
|
||||
"volviendo a añadir toda la videoteca.")
|
||||
|
||||
respuesta = platformtools.dialog_yesno("Alfa",
|
||||
"¿Deseas acceder ahora a esa seccion?")
|
||||
if respuesta == 1:
|
||||
itemlist = []
|
||||
from channels import setting
|
||||
new_item = Item(channel="setting", action="submenu_tools", folder=True)
|
||||
itemlist.extend(setting.submenu_tools(new_item))
|
||||
return itemlist
|
||||
|
||||
elif item.extra == "prob_torrent":
|
||||
title = "Alfa - FAQ - %s" % item.title[6:]
|
||||
text = ("Puedes probar descargando el modulo 'libtorrent' de Kodi o "
|
||||
"instalando algun addon como 'Quasar' o 'Torrenter', "
|
||||
"los cuales apareceran entre las opciones de la ventana emergente "
|
||||
"que aparece al pulsar sobre un enlace torrent. "
|
||||
"'Torrenter' es más complejo pero también más completo "
|
||||
"y siempre funciona.")
|
||||
|
||||
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
|
||||
|
||||
elif item.extra == "buscador_juntos":
|
||||
respuesta = platformtools.dialog_yesno("Alfa",
|
||||
"Si. La opcion de mostrar los resultados juntos "
|
||||
"o divididos por canales se encuentra en "
|
||||
"'setting'>'Ajustes del buscador global'>"
|
||||
"'Otros ajustes'.",
|
||||
"¿Deseas acceder a ahora dichos ajustes?")
|
||||
if respuesta == 1:
|
||||
from channels import search
|
||||
search.settings("")
|
||||
|
||||
elif item.extra == "report_error":
|
||||
if config.get_platform(True)['num_version'] < 14:
|
||||
log_name = "xbmc.log"
|
||||
else:
|
||||
log_name = "kodi.log"
|
||||
ruta = xbmc.translatePath("special://logpath") + log_name
|
||||
title = "Alfa - FAQ - %s" % item.title[6:]
|
||||
text = ("Para reportar un problema en 'http://alfa-addon.com' es necesario:\n"
|
||||
" - Versión que usas de Alfa.\n"
|
||||
" - Versión que usas de kodi, mediaserver, etc.\n"
|
||||
" - Versión y nombre del sistema operativo que usas.\n"
|
||||
" - Nombre del skin (en el caso que uses Kodi) y si se "
|
||||
"te ha resuelto el problema al usar el skin por defecto.\n"
|
||||
" - Descripción del problema y algún caso de prueba.\n"
|
||||
" - Agregar el log en modo detallado, una vez hecho esto, "
|
||||
"zipea el log y lo puedes adjuntar en un post.\n\n"
|
||||
"Para activar el log en modo detallado, ingresar a:\n"
|
||||
" - Configuración.\n"
|
||||
" - Preferencias.\n"
|
||||
" - En la pestaña General - Marcar la opción: Generar log detallado.\n\n"
|
||||
"El archivo de log detallado se encuentra en la siguiente ruta: \n\n"
|
||||
"%s" % ruta)
|
||||
|
||||
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
|
||||
|
||||
else:
|
||||
platformtools.dialog_ok("Alfa",
|
||||
"Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\n"
|
||||
"Si tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com")
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -76,7 +76,7 @@ def episodios(item):
|
||||
title=title, url=url, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url,
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir Temporada/Serie a la biblioteca de Kodi[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -33,15 +33,14 @@ def mainlist(item):
|
||||
def explorar(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
url1 = str(item.url)
|
||||
url1 = item.title
|
||||
data = httptools.downloadpage(host).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info("loca :"+url1+" aaa"+data)
|
||||
if 'genero' in url1:
|
||||
patron = '<div class="d"><h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
|
||||
if 'alfabetico' in url1:
|
||||
patron = '<\/li><\/ul><h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
|
||||
if 'año' in url1:
|
||||
if 'Género' in url1:
|
||||
patron = '<div class="d">.+?<h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
|
||||
if 'Listado Alfabético' in url1:
|
||||
patron = '<\/li><\/ul>.+?<h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
|
||||
if 'Año' in url1:
|
||||
patron = '<ul class="anio"><li>(.+?)<\/ul>'
|
||||
data_explorar = scrapertools.find_single_match(data, patron)
|
||||
patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>'
|
||||
@@ -79,26 +78,22 @@ def lista(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
|
||||
url1 = str(item.url)
|
||||
if 'http://www.peliculashindu.com/' in url1:
|
||||
url1 = url1.replace("http://www.peliculashindu.com/", "")
|
||||
if url1 != 'estrenos':
|
||||
data = scrapertools.find_single_match(data, '<div id="cuerpo"><div class="iz">.+>Otras')
|
||||
# data= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">.+>Otras')
|
||||
data_mov= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">(.+)<ul class="pag">')
|
||||
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
matches = scrapertools.find_multiple_matches(data_mov, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos",
|
||||
show=scrapedtitle))
|
||||
# Paginacion
|
||||
patron_pag = '<a href="([^"]+)" title="Siguiente .+?">'
|
||||
paginasig = scrapertools.find_single_match(data, patron_pag)
|
||||
logger.info("algoooosadf "+paginasig)
|
||||
|
||||
next_page_url = item.url + paginasig
|
||||
next_page_url = host + paginasig
|
||||
|
||||
if paginasig != "":
|
||||
item.url = next_page_url
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
|
||||
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
|
||||
return itemlist
|
||||
|
||||
@@ -114,10 +109,9 @@ def findvideos(item):
|
||||
logger.info("holaa" + data)
|
||||
patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>'
|
||||
show = scrapertools.find_single_match(data, patron_show)
|
||||
logger.info("holaa" + show)
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
|
||||
|
||||
@@ -240,10 +240,10 @@ def findvideos(item):
|
||||
))
|
||||
for videoitem in templist:
|
||||
data = httptools.downloadpage(videoitem.url).data
|
||||
urls_list = scrapertools.find_multiple_matches(data, '{"reorder":1,"type":.*?}')
|
||||
|
||||
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
|
||||
for element in urls_list:
|
||||
json_data=jsontools.load(element)
|
||||
|
||||
id = json_data['id']
|
||||
sub = json_data['srt']
|
||||
url = json_data['source']
|
||||
@@ -253,7 +253,6 @@ def findvideos(item):
|
||||
|
||||
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
|
||||
'=%s&srt=%s' % (url, sub)
|
||||
logger.debug('new_url: %s' % new_url)
|
||||
|
||||
data = httptools.downloadpage(new_url).data
|
||||
data = re.sub(r'\\', "", data)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"id": "playmax",
|
||||
"name": "PlayMax",
|
||||
"language": ["cast", "lat"],
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "playmax.png",
|
||||
"banner": "playmax.png",
|
||||
|
||||
@@ -30,11 +30,6 @@ def mainlist(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
|
||||
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
|
||||
url= host + "/archivos/proximos-estrenos/pag/1",
|
||||
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
|
||||
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
|
||||
fanart=mifan))
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
|
||||
url= host + "/pag/1",
|
||||
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
|
||||
@@ -70,7 +65,8 @@ def menupelis(item):
|
||||
logger.info(item.url)
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
|
||||
if item.genre:
|
||||
item.extra = item.genre
|
||||
if item.extra == '':
|
||||
section = 'Recién Agregadas'
|
||||
elif item.extra == 'year':
|
||||
@@ -79,17 +75,13 @@ def menupelis(item):
|
||||
section = 'de Eróticas \+18'
|
||||
else:
|
||||
section = 'de %s'%item.extra
|
||||
|
||||
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
|
||||
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
|
||||
|
||||
patronenlaces = '<h.>Películas %s</h.>.*?>(.*?)</section>'%section
|
||||
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
|
||||
for bloque_enlaces in matchesenlaces:
|
||||
|
||||
patron = '<div class="poster-media-card">.*?'
|
||||
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
||||
patron += '<img src="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
||||
|
||||
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
title = title.replace("Online", "");
|
||||
@@ -144,21 +136,14 @@ def menudesta(item):
|
||||
# Peliculas de Estreno
|
||||
def menuestre(item):
|
||||
logger.info(item.url)
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
|
||||
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
|
||||
|
||||
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
|
||||
for bloque_enlaces in matchesenlaces:
|
||||
|
||||
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
|
||||
|
||||
patron = '<div class="poster-media-card">.*?'
|
||||
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
||||
patron += '<img src="(.*?)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
||||
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
@@ -255,32 +240,22 @@ def search(item, texto):
|
||||
patron += '<div class="row">.*?'
|
||||
patron += '<a href="(.*?)" title="(.*?)">.*?'
|
||||
patron += '<img src="(.*?)"'
|
||||
|
||||
logger.info(patron)
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
title = title.replace("Online", "")
|
||||
url = item.url + scrapedurl
|
||||
thumbnail = item.url + scrapedthumbnail
|
||||
logger.info(url)
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, fanart=thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def poranyo(item):
|
||||
logger.info(item.url)
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
|
||||
patron = '<option value="([^"]+)">(.*?)</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -289,7 +264,6 @@ def poranyo(item):
|
||||
url = item.url + scrapedurl
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
||||
fanart=item.fanart, extra='year'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -300,24 +274,25 @@ def porcateg(item):
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
adult_mode = config.get_setting("adult_mode")
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if "18" in scrapedtitle and adult_mode == 0:
|
||||
continue
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
title = title.replace("Online", "")
|
||||
url = scrapedurl
|
||||
logger.info(url)
|
||||
# si no esta permitidas categoria adultos, la filtramos
|
||||
extra = title
|
||||
adult_mode = config.get_setting("adult_mode")
|
||||
extra1 = title
|
||||
if adult_mode != 0:
|
||||
if 'erotic' in scrapedurl:
|
||||
extra = 'adult'
|
||||
extra1 = 'adult'
|
||||
else:
|
||||
extra=title
|
||||
extra1=title
|
||||
|
||||
if (extra=='adult' and adult_mode != 0) or extra != 'adult':
|
||||
if (extra1=='adult' and adult_mode != 0) or extra1 != 'adult':
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
||||
fanart=item.fanart, extra = extra))
|
||||
fanart=item.fanart, genre = extra1))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -338,7 +313,6 @@ def decode(string):
|
||||
i += 1
|
||||
enc4 = keyStr.index(input[i])
|
||||
i += 1
|
||||
|
||||
chr1 = (enc1 << 2) | (enc2 >> 4)
|
||||
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
|
||||
chr3 = ((enc3 & 3) << 6) | enc4
|
||||
@@ -352,4 +326,4 @@ def decode(string):
|
||||
|
||||
output = output.decode('utf8')
|
||||
|
||||
return output
|
||||
return output
|
||||
|
||||
@@ -290,7 +290,10 @@ def do_search(item, categories=None):
|
||||
multithread = config.get_setting("multithread", "search")
|
||||
result_mode = config.get_setting("result_mode", "search")
|
||||
|
||||
tecleado = item.extra
|
||||
if item.wanted!='':
|
||||
tecleado=item.wanted
|
||||
else:
|
||||
tecleado = item.extra
|
||||
|
||||
itemlist = []
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
|
||||
IDIOMAS = {'latino': 'Latino'}
|
||||
@@ -35,6 +36,7 @@ def mainlist(item):
|
||||
url=host,
|
||||
thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
|
||||
fanart='https://s27.postimg.org/iahczwgrn/series.png',
|
||||
page=0
|
||||
))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
@@ -49,15 +51,21 @@ def todas(item):
|
||||
'Serie><span>(.*?)<\/span>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches:
|
||||
# Paginacion
|
||||
num_items_x_pagina = 30
|
||||
min = item.page * num_items_x_pagina
|
||||
min=int(min)-int(item.page)
|
||||
max = min + num_items_x_pagina - 1
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches[min:max]:
|
||||
url = host + scrapedurl
|
||||
calidad = scrapedcalidad
|
||||
title = scrapedtitle.decode('utf-8')
|
||||
thumbnail = scrapedthumbnail
|
||||
fanart = 'https://s32.postimg.org/gh8lhbkb9/seodiv.png'
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
if not 'xxxxxx' in scrapedtitle:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="temporadas",
|
||||
title=title, url=url,
|
||||
thumbnail=thumbnail,
|
||||
@@ -67,7 +75,13 @@ def todas(item):
|
||||
language=language,
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if len(itemlist)>28:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR cyan]Página Siguiente >>[/COLOR]",
|
||||
url=item.url, action="todas",
|
||||
page=item.page + 1))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -222,16 +236,31 @@ def episodiosxtemp(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
lang=[]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_items = servertools.find_video_items(item)
|
||||
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
language_items=scrapertools.find_single_match(data,
|
||||
'<ul class=tabs-sidebar-ul>(.+?)<\/ul>')
|
||||
matches=scrapertools.find_multiple_matches(language_items,
|
||||
'<li><a href=#ts(.+?)><span>(.+?)<\/span><\/a><\/li>')
|
||||
for idl,scrapedlang in matches:
|
||||
if int(idl)<5 and int(idl)!=1:
|
||||
lang.append(scrapedlang)
|
||||
i=0
|
||||
logger.info(lang)
|
||||
for videoitem in video_items:
|
||||
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
|
||||
videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
|
||||
'class="f-info-text">(.*?)<\/span>')
|
||||
#videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
|
||||
# 'class="f-info-text">(.*?)<\/span>')
|
||||
if len(lang)<=i:
|
||||
videoitem.language=lang[i]
|
||||
else:
|
||||
videoitem.language=lang[len(lang)-1]
|
||||
videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
|
||||
videoitem.quality = 'default'
|
||||
videoitem.context = item.context
|
||||
i=i+1
|
||||
itemlist.append(videoitem)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
@@ -29,7 +29,8 @@ list_servers = ['powvideo',
|
||||
'nowvideo',
|
||||
'gamovideo',
|
||||
'kingvid',
|
||||
'vidabc'
|
||||
'vidabc',
|
||||
'streamixcloud'
|
||||
]
|
||||
|
||||
|
||||
@@ -102,11 +103,14 @@ def extract_series_from_data(item, data):
|
||||
else:
|
||||
action = "findvideos"
|
||||
|
||||
context1=[filtertools.context(item, list_idiomas, CALIDADES), autoplay.context]
|
||||
context = filtertools.context(item, list_idiomas, CALIDADES)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
|
||||
itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url),
|
||||
action=action, show=name,
|
||||
thumbnail=img,
|
||||
context=context1))
|
||||
context=context))
|
||||
|
||||
more_pages = re.search('pagina=([0-9]+)">>>', data)
|
||||
if more_pages:
|
||||
@@ -305,11 +309,11 @@ def findvideos(item):
|
||||
|
||||
for i in range(len(list_links)):
|
||||
a=list_links[i].title
|
||||
b=a.lstrip('Ver en')
|
||||
b=a[a.find("en") + 2:]
|
||||
c=b.split('[')
|
||||
d=c[0].rstrip( )
|
||||
d=d.lstrip( )
|
||||
list_links[i].server=d
|
||||
list_links[i].server=d.replace("streamix", "streamixcloud")
|
||||
|
||||
list_links = servertools.get_servers_itemlist(list_links)
|
||||
autoplay.start(list_links, item)
|
||||
|
||||
@@ -67,19 +67,21 @@ def lista(item):
|
||||
title = name
|
||||
url = host + link
|
||||
scrapedthumbnail = host + img
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
|
||||
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
|
||||
context=context1))
|
||||
logger.info("gasdfsa "+str(b))
|
||||
context=context))
|
||||
if b<29:
|
||||
a=a+1
|
||||
url="https://serieslan.com/pag-"+str(a)
|
||||
if b>10:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Página Siguiente >>", url=url, action="lista", page=0))
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
@@ -90,7 +92,6 @@ def episodios(item):
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("info %s " % data)
|
||||
# obtener el numero total de episodios
|
||||
total_episode = 0
|
||||
|
||||
@@ -136,7 +137,7 @@ def episodios(item):
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
return itemlist
|
||||
@@ -183,7 +184,6 @@ def findvideos(item):
|
||||
data = eval(data)
|
||||
|
||||
if type(data) == list:
|
||||
logger.debug("inside")
|
||||
video_url = url_server % (txc(ide, base64.decodestring(data[2])))
|
||||
server = "openload"
|
||||
if " SUB" in item.title:
|
||||
@@ -193,7 +193,11 @@ def findvideos(item):
|
||||
else:
|
||||
lang = "Latino"
|
||||
title = "Enlace encontrado en " + server + " [" + lang + "]"
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
|
||||
if item.contentChannel=='videolibrary':
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", url=video_url,
|
||||
thumbnail=thumbnail, server=server, folder=False))
|
||||
else:
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
|
||||
thumbnail=thumbnail, server=server, folder=False))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
@@ -201,17 +205,3 @@ def findvideos(item):
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# Buscamos video por servidor ...
|
||||
devuelve = servertools.findvideosbyserver(item.url, item.server)
|
||||
if not devuelve:
|
||||
# ...sino lo encontramos buscamos en todos los servidores disponibles
|
||||
devuelve = servertools.findvideos(item.url, skip=True)
|
||||
if devuelve:
|
||||
# logger.debug(devuelve)
|
||||
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
|
||||
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
|
||||
return itemlist
|
||||
|
||||
@@ -34,6 +34,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades -Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,10 +252,13 @@ def newest(categoria):
|
||||
item.extra = 'estrenos/'
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + '/category/estrenos/'
|
||||
item.url = host + '/genre/estrenos/'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/category/infantil/'
|
||||
item.url = host + '/genre/animacion/'
|
||||
|
||||
elif categoria == 'terror':
|
||||
item.url = host + '/genre/terror/'
|
||||
|
||||
itemlist = lista(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
|
||||
@@ -45,15 +45,15 @@ def getmainlist(view="thumb_"):
|
||||
context=[{"title": "Configurar Descargas", "channel": "setting", "config": "downloads",
|
||||
"action": "channel_config"}]))
|
||||
|
||||
thumb_configuracion = "setting_%s.png" % 0 # config.get_setting("plugin_updates_available")
|
||||
thumb_setting = "setting_%s.png" % 0 # config.get_setting("plugin_updates_available")
|
||||
|
||||
itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="mainlist",
|
||||
thumbnail=get_thumb(thumb_configuracion, view),
|
||||
thumbnail=get_thumb(thumb_setting, view),
|
||||
category=config.get_localized_string(30100), viewmode="list"))
|
||||
# TODO REVISAR LA OPCION AYUDA
|
||||
# itemlist.append(Item(title=config.get_localized_string(30104), channel="help", action="mainlist",
|
||||
# thumbnail=get_thumb("help.png", view),
|
||||
# category=config.get_localized_string(30104), viewmode="list"))
|
||||
|
||||
itemlist.append(Item(title=config.get_localized_string(30104), channel="help", action="mainlist",
|
||||
thumbnail=get_thumb("help.png", view),
|
||||
category=config.get_localized_string(30104), viewmode="list"))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -23,8 +23,8 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
|
||||
|
||||
# Headers por defecto, si no se especifica nada
|
||||
default_headers = dict()
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"
|
||||
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
|
||||
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
|
||||
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
|
||||
default_headers["Accept-Charset"] = "UTF-8"
|
||||
default_headers["Accept-Encoding"] = "gzip"
|
||||
|
||||
@@ -398,9 +398,9 @@ def set_context_commands(item, parent_item):
|
||||
if item.contentType in ['movie','tvshow']and item.channel != 'search':
|
||||
# Buscar en otros canales
|
||||
if item.contentSerieName!='':
|
||||
item.extra=item.contentSerieName
|
||||
item.wanted=item.contentSerieName
|
||||
else:
|
||||
item.extra = item.contentTitle
|
||||
item.wanted = item.contentTitle
|
||||
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
|
||||
"XBMC.Container.Update (%s?%s)" % (sys.argv[0],
|
||||
item.clone(channel='search',
|
||||
|
||||
@@ -412,8 +412,12 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
|
||||
self.addControl(control)
|
||||
|
||||
control.setVisible(False)
|
||||
control.setLabel(c["label"])
|
||||
control.setText(self.values[c["id"]])
|
||||
# frodo fix
|
||||
s = self.values[c["id"]]
|
||||
if s is None:
|
||||
s = ''
|
||||
control.setText(s)
|
||||
# control.setText(self.values[c["id"]])
|
||||
control.setWidth(self.controls_width - 5)
|
||||
control.setHeight(self.height_control)
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
</category>
|
||||
<category label="Opciones Visuales">
|
||||
<setting id="icon_set" type="labelenum" label="Set de iconos" values="default|dark" default="default"/>
|
||||
<setting id="infoplus_set" type="labelenum" label="Opción visual Infoplus" values="Sin animación|Con animación" default="Sin animación"/>
|
||||
</category>
|
||||
<category label="Otros">
|
||||
<setting label="Info de películas/series en menú contextual" type="lsep"/>
|
||||
|
||||
@@ -37,12 +37,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
|
||||
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
|
||||
# Para obtener el f y el fxfx
|
||||
js_fxfx = "https://www." + scrapertools.find_single_match(data, """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
|
||||
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/jss/coder.js.*?[^(?:'|")]+)""")
|
||||
data_fxfx = httptools.downloadpage(js_fxfx).data
|
||||
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
|
||||
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
|
||||
for f, v in matches:
|
||||
pfxfx += f + "=" + v + "&"
|
||||
logger.info("mfxfxfx1= %s" %js_fxfx)
|
||||
logger.info("mfxfxfx2= %s" %pfxfx)
|
||||
if pfxfx == "":
|
||||
pfxfx = "ss=yes&f=fail&fxfx=6"
|
||||
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
|
||||
# {f: 'y', fxfx: '6'}
|
||||
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
|
||||
|
||||
@@ -7,13 +7,12 @@ from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:57.0) Gecko/20100101 ' \
|
||||
'Firefox/57.0'}
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:58.0) Gecko/20100101 Firefox/58.0'}
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url, headers=headers).data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
|
||||
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
|
||||
@@ -25,7 +24,7 @@ def test_video_exists(page_url):
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url, headers=headers).data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
packer = scrapertools.find_single_match(data,
|
||||
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
|
||||
if packer != "":
|
||||
|
||||
@@ -30,12 +30,20 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
streams =[]
|
||||
logger.debug('page_url: %s'%page_url)
|
||||
if 'googleusercontent' in page_url:
|
||||
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
|
||||
url=data.headers['location']
|
||||
|
||||
response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
|
||||
url=response.headers['location']
|
||||
cookies = ""
|
||||
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||
for c in cookie:
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
headers_string = "|Cookie=" + cookies
|
||||
|
||||
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
|
||||
|
||||
streams.append((quality, url))
|
||||
headers_string=""
|
||||
|
||||
else:
|
||||
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
||||
|
||||
@@ -8,9 +8,11 @@ from platformcode import logger
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "video_error.mp4" in data:
|
||||
response = httptools.downloadpage(page_url)
|
||||
if "video_error.mp4" in response.data:
|
||||
return False, "[Stormo] El archivo no existe o ha sido borrado"
|
||||
if response.code == 451:
|
||||
return False, "[Stormo] El archivo ha sido borrado por problemas legales."
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Not Found" in data:
|
||||
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
|
||||
@@ -21,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
packed = scrapertools.find_single_match(data,
|
||||
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
|
||||
packed = scrapertools.find_single_match(data, patron)
|
||||
data = jsunpack.unpack(packed)
|
||||
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
|
||||
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
"find_videos": {
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']",
|
||||
"url": "\\1"
|
||||
"pattern": "(vshare.io/v/[a-zA-Z0-9/-]+)",
|
||||
"url": "http://\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -40,11 +40,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
|
||||
strResult = "".join(arrayResult)
|
||||
logger.debug(strResult)
|
||||
|
||||
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
|
||||
|
||||
for url, label in videoSources:
|
||||
logger.debug("[" + label + "] " + url)
|
||||
video_urls.append([label, url])
|
||||
|
||||
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user