Merge remote-tracking branch 'alfa-addon/master' into Fixes

This commit is contained in:
unknown
2018-04-20 13:52:23 -03:00
4 changed files with 111 additions and 73 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.5.9" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.5.10" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,17 +19,10 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» torrentrapid » torrentlocura
» mispelisyseries » descargas2020
» mejortorrent » tvsinpagar
» cinefox » newpct
» peliculasdk » netutv
» pepecine » seriespapaya
» doomtv » dostream
» pelisgratis » estream
» plusdede
» documentalesonline » flashx
» danimados
¤ arreglos internos
¤ Gracias a @pipcat,@Rhinox117,@lopezvg por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -148,11 +148,11 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data1 = scrapertools.find_single_match(data,
data1 = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
itemla = scrapertools.find_multiple_matches(data1,patron)
if "favicons?domain" in itemla[1]:
if "favicons?domain" in itemla[0]:
method = 1
data2=scrapertools.find_single_match(data, "var \$user_hashs = {(.+?)}")
patron='".+?":"(.+?)"'

View File

@@ -4,55 +4,79 @@ import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from channelselector import get_thumb
from platformcode import logger
HOST = "http://documentales-online.com/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=HOST,
itemlist.append(Item(channel=item.channel, title="Novedades", action="videos", url=HOST,
thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel=item.channel, title="Destacados", action="seccion", url=HOST, extra="destacados",
thumbnail=get_thumb('hot', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series", action="seccion", url=HOST, extra="series",
itemlist.append(Item(channel=item.channel, title="Series destacadas", action="seccion", url=HOST, extra="series",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Categorías", action="categorias", url=HOST,
thumbnail=get_thumb('categories', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Top 100", action="categorias", url=HOST))
# itemlist.append(Item(channel=item.channel, title="Populares", action="categorias", url=HOST))
itemlist.append(Item(channel=item.channel, title="Top 100", action="listado", url=HOST + "top/",
thumbnail=get_thumb('more voted', auto=True)))
itemlist.append(Item(channel=item.channel, title="Populares", action="listado", url=HOST + "populares/",
thumbnail=get_thumb('more watched', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series y Temas", action="listado", url=HOST + "series-temas/",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search",
thumbnail=get_thumb('search', auto=True)))
return itemlist
# itemlist.append(Item(channel=item.channel, title=" Series y Temas", action="categorias", url=HOST))
def listado(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace('<span class="wpp-views">', '')
bloque = scrapertools.find_single_match(data, 'class="post-entry(.*?)class="post-share')
if "series-temas" not in item.url:
patron = '<a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += '/a>([^<]+)<'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedextra in matches:
itemlist.append(Item(action = "findvideos",
channel = item.channel,
title = scrapedtitle + scrapedextra,
url = HOST + scrapedurl
))
else:
patron = """<a href='([^']+)'.*?"""
patron += """>([^<]+)<.*?"""
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(action = "videos",
channel = item.channel,
title = scrapedtitle,
url = HOST + scrapedurl
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.extra == "destacados":
patron_seccion = '<h4 class="widget-title">Destacados</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "findvideos"
else:
patron_seccion = '<h4 class="widget-title">Series destacadas</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "listado"
action = "videos"
data = scrapertools.find_single_match(data, patron_seccion)
matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
aux_action = action
for url, title in matches:
if item.extra != "destacados" and "Cosmos (Carl Sagan)" in title:
@@ -60,61 +84,46 @@ def seccion(item):
else:
action = aux_action
itemlist.append(item.clone(title=title, url=url, action=action, fulltitle=title))
return itemlist
def listado(item):
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
pagination = scrapertools.find_single_match(data, '<div class="older"><a href="([^"]+)"')
pagination = scrapertools.find_single_match(data, "rel='next' href='([^']+)'")
if not pagination:
pagination = scrapertools.find_single_match(data, '<span class=\'current\'>\d</span>'
'<a class="page larger" href="([^"]+)">')
patron = '<ul class="sp-grid">(.*?)</ul>'
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<a href="([^"]+)">(.*?)</a>.*?<img.*?src="([^"]+)"', re.DOTALL).findall(data)
for url, title, thumb in matches:
itemlist.append(item.clone(title=title, url=url, action="findvideos", fulltitle=title, thumbnail=thumb))
if pagination:
itemlist.append(item.clone(title=">> Página siguiente", url=pagination))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, 'a href="#">Categorías</a><ul class="sub-menu">(.*?)</ul>')
matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
for url, title in matches:
itemlist.append(item.clone(title=title, url=url, action="listado", fulltitle=title))
itemlist.append(item.clone(title=title, url=url, action="videos", fulltitle=title))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = HOST + "?s=%s" % texto
return listado(item)
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
@@ -125,37 +134,21 @@ def search(item, texto):
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.fulltitle == "Cosmos (Carl Sagan)":
matches = scrapertools.find_multiple_matches(data,
'<p><strong>(.*?)</strong><br /><iframe.+?src="(https://www\.youtube\.com/[^?]+)')
if "Cosmos (Carl Sagan)" in item.title:
patron = '(?s)<p><strong>([^<]+)<.*?'
patron += '<iframe.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data,patron)
for title, url in matches:
new_item = item.clone(title=title, url=url)
from core import servertools
aux_itemlist = servertools.find_video_items(new_item)
for videoitem in aux_itemlist:
videoitem.title = new_item.title
videoitem.fulltitle = new_item.title
videoitem.channel = item.channel
# videoitem.thumbnail = item.thumbnail
itemlist.extend(aux_itemlist)
itemlist.append(item.clone(action = "play", title=title, url=url
))
else:
data = scrapertools.find_multiple_matches(data, '<iframe.+?src="(https://www\.youtube\.com/[^?]+)')
from core import servertools
data = scrapertools.find_multiple_matches(data, '<iframe.+?src="([^"]+)"')
itemlist.extend(servertools.find_video_items(data=",".join(data)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
# videoitem.thumbnail = item.thumbnail
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -18,6 +18,58 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.bz/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
httptools.downloadpage(cgi_counter, cookies=False)
time.sleep(6)
url_playitnow = "https://www.flashx.bz/dl?playitnow"
fid = scrapertools.find_single_match(data, 'input type="hidden" name="id" value="([^"]*)"')
fname = scrapertools.find_single_match(data, 'input type="hidden" name="fname" value="([^"]*)"')
fhash = scrapertools.find_single_match(data, 'input type="hidden" name="hash" value="([^"]*)"')
headers = {'Content': 'application/x-www-form-urlencoded'}
post_parameters = {
"op": "download1",
"usr_login": "",
"id": fid,
"fname": fname,
"referer": "https://www.flashx.bz/",
"hash": fhash,
"imhuman": "Continue To Video"
}
data = httptools.downloadpage(url_playitnow, urllib.urlencode(post_parameters), headers=headers).data
video_urls = []
media_urls = scrapertools.find_multiple_matches(data, "{src: '([^']+)'.*?,label: '([^']+)'")
subtitle = ""
for media_url, label in media_urls:
if media_url.endswith(".srt") and label == "Spanish":
try:
from core import filetools
data = httptools.downloadpage(media_url)
subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
filetools.write(subtitle, data)
except:
import traceback
logger.info("Error al descargar el subtítulo: " + traceback.format_exc())
for media_url, label in media_urls:
if not media_url.endswith("png") and not media_url.endswith(".srt"):
video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def get_video_url_anterior(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")