Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="1.9.1" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="1.9.2" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,15 +19,13 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
[I]- cinetux
|
||||
[I]- pelis24
|
||||
- vidoza
|
||||
- canalpelis
|
||||
- pelisplanet
|
||||
- cinetux
|
||||
- peliculasrey
|
||||
- newpct1
|
||||
- pelisplus
|
||||
- torrentlocura - fix para usar videoteca y en mediaserver
|
||||
- animeflv
|
||||
- fixes internos[/I]
|
||||
[COLOR green]Gracias a [COLOR yellow][B]xabier100[/B][/COLOR] y [COLOR yellow][B]fermintxu[/B][/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import time
|
||||
import urlparse
|
||||
import urllib
|
||||
|
||||
from channels import renumbertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
@@ -273,42 +276,54 @@ def findvideos(item):
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
|
||||
list_videos.extend(scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"'))
|
||||
# logger.info("data=%s " % list_videos)
|
||||
|
||||
download_list = scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"')
|
||||
for i in download_list:
|
||||
list_videos.append(urllib.unquote_plus(i))
|
||||
aux_url = []
|
||||
cldup = False
|
||||
for e in list_videos:
|
||||
if e.startswith("https://s3.animeflv.com/embed.php?"):
|
||||
server = scrapertools.find_single_match(e, 'server=(.*?)&')
|
||||
e = e.replace("embed", "check").replace("https", "http")
|
||||
data = httptools.downloadpage(e).data.replace("\\", "")
|
||||
url_api = "https://s3.animeflv.com/check.php?server=%s&v=%s"
|
||||
# izanagi, yourupload, hyperion
|
||||
if e.startswith("https://s3.animeflv.com/embed"):
|
||||
server, v = scrapertools.find_single_match(e, 'server=([^&]+)&v=(.*?)$')
|
||||
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
|
||||
|
||||
if '{"error": "Por favor intenta de nuevo en unos segundos", "sleep": 3}' in data:
|
||||
import time
|
||||
time.sleep(3)
|
||||
data = httptools.downloadpage(e).data.replace("\\", "")
|
||||
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
|
||||
|
||||
video_urls = []
|
||||
if server == "gdrive":
|
||||
data = jsontools.load(data)
|
||||
for s in data.get("sources", []):
|
||||
video_urls.append([s["label"], s["type"], s["file"]])
|
||||
|
||||
if video_urls:
|
||||
video_urls.sort(key=lambda v: int(v[0]))
|
||||
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
|
||||
video_urls=video_urls))
|
||||
else:
|
||||
if server != "hyperion":
|
||||
url = scrapertools.find_single_match(data, '"file":"([^"]+)"')
|
||||
if url:
|
||||
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
|
||||
|
||||
else:
|
||||
# pattern = '"direct":"([^"]+)"'
|
||||
# url = scrapertools.find_single_match(data, pattern)
|
||||
# itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
|
||||
|
||||
pattern = '"label":([^,]+),"type":"video/mp4","file":"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
|
||||
video_urls = []
|
||||
for label, url in matches:
|
||||
video_urls.append([label, "mp4", url])
|
||||
if video_urls:
|
||||
video_urls.sort(key=lambda u: int(u[0]))
|
||||
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
|
||||
video_urls=video_urls))
|
||||
|
||||
else:
|
||||
if e.startswith("https://cldup.com") and cldup == False:
|
||||
itemlist.append(item.clone(title="Enlace encontrado en Cldup",
|
||||
action="play",
|
||||
url = e))
|
||||
cldup = True
|
||||
aux_url.append(e)
|
||||
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
|
||||
for videoitem in itemlist:
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
|
||||
@@ -381,11 +381,10 @@ def play(item):
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
action = "play",
|
||||
title = "%s",
|
||||
fulltitle = item.fulltitle,
|
||||
thumbnail = item.thumbnail,
|
||||
server = "",
|
||||
url = item.url
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
@@ -374,7 +375,7 @@ def get_episodios(item):
|
||||
paginacion = scrapertools.get_match(data, patron)
|
||||
# logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion)
|
||||
if "Next" in paginacion:
|
||||
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1]
|
||||
url_next_page = "http" + scrapertools.get_match(paginacion, '<a href="http([^>]+)>Next</a>')[:-1]
|
||||
url_next_page = url_next_page.replace(" ", "%20")
|
||||
# logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page)
|
||||
itemlist.append(
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -28,7 +29,7 @@ def PorFecha(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="lanzamiento">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -54,7 +55,7 @@ def Idiomas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="idioma">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -80,7 +81,7 @@ def calidades(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="calidades">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -106,7 +107,7 @@ def generos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="generos">(.*?)</section>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -121,6 +122,8 @@ def generos(item):
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if "Adulto" in title and config.get_setting("adult_mode") == 0:
|
||||
continue
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, viewmode="movie"))
|
||||
@@ -148,7 +151,7 @@ def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info("data=" + data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
@@ -178,7 +181,7 @@ def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.info("data="+data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
"categories": [
|
||||
"latino",
|
||||
"movie",
|
||||
"vos"
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
93
plugin.video.alfa/channels/pelis24.py
Executable file → Normal file
93
plugin.video.alfa/channels/pelis24.py
Executable file → Normal file
@@ -5,8 +5,11 @@ import sys
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import tmdb
|
||||
|
||||
|
||||
thumbnail_host = 'https://github.com/master-1970/resources/raw/master/images/squares/pelis24.PNG'
|
||||
|
||||
@@ -18,14 +21,14 @@ def mainlist(item):
|
||||
item.action = "peliculas"
|
||||
itemlist.append(item.clone(title="Novedades", url="http://www.pelis24.tv/ultimas-peliculas/"))
|
||||
itemlist.append(item.clone(title="Estrenos", url="http://pelis24.tv/estrenos/"))
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
itemlist.append(item.clone(title="Calidad HD", url="https://pelis24.tv/xfsearch/calidad/HD"))
|
||||
itemlist.append(item.clone(title="Calidad HQ", url="https://pelis24.tv/xfsearch/calidad/HQ"))
|
||||
itemlist.append(item.clone(title="Calidad SD", url="https://pelis24.tv/xfsearch/calidad/SD"))
|
||||
itemlist.append(item.clone(title="Castellano", url="http://pelis24.tv/pelicula-ca/"))
|
||||
itemlist.append(item.clone(title="Latino", url="http://pelis24.tv/pelicula-latino/"))
|
||||
itemlist.append(item.clone(title="Latino", url="https://pelis24.tv/pelicula-la/"))
|
||||
itemlist.append(item.clone(title="Versión original", url="http://pelis24.tv/peliculasvo/"))
|
||||
itemlist.append(item.clone(title="Versión original subtitulada", url="http://pelis24.tv/peliculasvose/"))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
itemlist.append(item.clone(title="Filtrar por género", action="genero", url="http://pelis24.tv/tags/"))
|
||||
itemlist.append(item.clone(title="Versión original subtitulada", url="http://pelis24.tv/peliculas-su/"))
|
||||
itemlist.append(item.clone(title="Filtrar por género", action="genero", url="http://pelis24.tv"))
|
||||
itemlist.append(item.clone(title="Buscar", action="search", url="http://www.pelis24.tv/"))
|
||||
return itemlist
|
||||
|
||||
@@ -105,12 +108,15 @@ def buscar(item):
|
||||
def genero(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
generos = ["Animación", "Aventuras", "Bélico", "Ciencia+ficción", "Crimen", "Comedia",
|
||||
"Deporte", "Drama", "Fantástico", "Infantil", "Musical", "Romance", "Terror", "Thriller"]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="\/xfsearch\/genero\/([^"]+)"(?: title=".*?").*?(.*?)<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for g in generos:
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=g.replace('+', ' '),
|
||||
thumbnail=thumbnail_host, url=item.url + g + "/"))
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = '%s/xfsearch/genero/%s' % (item.url, scrapedurl)
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedurl,
|
||||
thumbnail=thumbnail_host, url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -126,8 +132,8 @@ def peliculas(item):
|
||||
patron = '<div class="movie-img img-box">.*?'
|
||||
patron += '<img src="([^"]+).*?'
|
||||
patron += 'href="([^"]+).*?'
|
||||
patron += '<div class="movie-series">([^<]+)</div>'
|
||||
patron += '<span><a href=[^>]+>([^<]+)</a>'
|
||||
patron += '<div class="movie-series">(.*?)<\/.*?'
|
||||
patron += '<a href=[^>]+>([^<]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -139,16 +145,71 @@ def peliculas(item):
|
||||
if not thumbnail.startswith("http"):
|
||||
thumbnail = "http://www.pelis24.tv" + thumbnail
|
||||
contentTitle = title.split("/")[0]
|
||||
year = scrapertools.find_single_match(contentTitle, '\((\d{4})\)')
|
||||
contentTitle= contentTitle.replace (' (%s)'%year, '')
|
||||
title = "%s (%s)" % (contentTitle, quality)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
contentQuality=quality, contentTitle=contentTitle))
|
||||
contentQuality=quality, contentTitle=contentTitle, infoLabels = {'year':year}))
|
||||
if item.title != 'Versión original':
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Extrae el paginador
|
||||
next_page = scrapertools.find_single_match(data, '<span class="pnext"><a href="([^"]+)')
|
||||
next_page = scrapertools.find_single_match(data, '<span class="pnext".*?<a href="([^"]+)')
|
||||
if next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Página siguiente",
|
||||
thumbnail=thumbnail_host, url=next_page))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
itemlist=[]
|
||||
duplicated =[]
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="player-box" id="tabs-(\d+)"><iframe data-src="(.*?)".*?allowfullscreen'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for id, scrapedurl in matches:
|
||||
lang = scrapertools.find_single_match(data, '<li><a href="#tabs-%s"><img src=".*?" alt="(.*?)".*?\/>'%id)
|
||||
server = servertools.get_server_from_url(scrapedurl)
|
||||
title = '%s (%s) (%s)' % (item.title, server, lang)
|
||||
thumbnail = ''
|
||||
if 'enlac' in scrapedurl:
|
||||
|
||||
if 'google' in scrapedurl:
|
||||
server = 'gvideo'
|
||||
elif 'openload' in scrapedurl:
|
||||
server = 'openload'
|
||||
|
||||
title = '%s (%s) (%s)'%(item.title, server, lang)
|
||||
scrapedurl = scrapedurl.replace('embed','stream')
|
||||
gdata = httptools.downloadpage(scrapedurl).data
|
||||
url_list = servertools.findvideosbyserver(gdata, server)
|
||||
for url in url_list:
|
||||
if url[1] not in duplicated:
|
||||
thumbnail = servertools.guess_server_thumbnail(server)
|
||||
itemlist.append(item.clone(title=title, url=url[1], action='play', server=server,
|
||||
thumbnail = thumbnail))
|
||||
duplicated.append(url[1])
|
||||
|
||||
elif '.html' in scrapedurl:
|
||||
url_list = servertools.findvideosbyserver(data, server)
|
||||
for url in url_list:
|
||||
if url[1] not in duplicated:
|
||||
thumbnail = servertools.guess_server_thumbnail(server)
|
||||
itemlist.append(item.clone(title = title, url=url[1], action='play', server=server,
|
||||
thumbnail = thumbnail))
|
||||
duplicated.append(url[1])
|
||||
else:
|
||||
url = scrapedurl
|
||||
if url not in duplicated:
|
||||
thumbnail = servertools.guess_server_thumbnail(server)
|
||||
itemlist.append(item.clone(title= title, url=url, action='play', server=server, thumbnail =
|
||||
thumbnail))
|
||||
duplicated.append(url)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -19,27 +19,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
request = urllib2.Request(jk_url, headers=request_headers)
|
||||
response = urllib2.urlopen(request)
|
||||
video_urls.append([".mp4 [redirects]", response.geturl()])
|
||||
else:
|
||||
headers = []
|
||||
headers.append(["User-Agent",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.94 Safari/537.36"])
|
||||
headers.append(["Accept-Encoding", "gzip,deflate,sdch"])
|
||||
page_url = page_url.replace("https://animeflv.net/embed_izanagi.php?key=",
|
||||
"https://s2.animeflv.net/izanagi.php?id=")
|
||||
page_url = page_url.replace("http://animeflv.net/embed_yotta.php?key=",
|
||||
"https://s1.animeflv.com/gdrive.php?id=")
|
||||
data = scrapertools.cache_page(page_url, headers=headers)
|
||||
data = data.replace("\\\\", "")
|
||||
data = data.replace("\\/", "/")
|
||||
patronvideos = '"file":"(.+?)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
video_urls.append([".mp4 [redirects]", match])
|
||||
|
||||
patronvideos = '(http://www.animeid.+?)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
response = urllib2.urlopen(match)
|
||||
video_urls.append([".mp4 [redirects]", response.geturl()])
|
||||
|
||||
return video_urls
|
||||
|
||||
@@ -8,7 +8,7 @@ from platformcode import logger
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Page not found" in data:
|
||||
if "Page not found" in data or "File was deleted" in data:
|
||||
return False, "[vidoza] El archivo no existe o ha sido borrado"
|
||||
elif "processing" in data:
|
||||
return False, "[vidoza] El vídeo se está procesando"
|
||||
|
||||
Reference in New Issue
Block a user