Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2017-10-17 08:04:20 -03:00
15 changed files with 550 additions and 49 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.2.2" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.2.3" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,19 +19,12 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» maxipelis » peliculasaudiolatino
» peliculasmx » peliscity
» repelis » seriesmeme
» seriesyonkis » verpeliculasnuevas
» zonatorrent » kabagi/diskokosmico
» tiotorrent » allcalidad
» areadocumental » cinetux
» hdfull » newpct1
» ohpelis » animeyt
» flashx » kbagi
» gamovideo » vidup
» animeyt » pelismundo
» asialiveaction » animeflv_me
» newpct1 » wopelis
» gvideo » powvideo
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]RIgodonius[/COLOR] por su colaboración en esta versión[/COLOR]
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] y [COLOR yellow]robalo[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -58,10 +58,9 @@ def get_cookie_value():
return cookies_value
header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.me&Cookie=" + \
header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \
get_cookie_value()
def __find_next_page(html):
"""
Busca el enlace a la pagina siguiente
@@ -71,12 +70,6 @@ def __find_next_page(html):
def __extract_info_from_serie(html):
"""
Extrae la información de una serie o pelicula desde su página
Util para cuando una busqueda devuelve un solo resultado y animeflv.me
redirecciona a la página de este.
"""
title = scrapertools.find_single_match(html, REGEX_TITLE)
title = clean_title(title)
url = scrapertools.find_single_match(html, REGEX_URL)
@@ -156,7 +149,7 @@ def mainlist(item):
def letras(item):
logger.info()
base_url = 'http://animeflv.me/ListadeAnime?c='
base_url = 'http://animeflv.co/ListadeAnime?c='
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#"))
@@ -305,18 +298,20 @@ def episodios(item):
def findvideos(item):
logger.info()
itemlist = []
encontrados = []
page_html = get_url_contents(item.url)
regex_api = r'http://player\.animeflv\.co/[^\"]+'
iframe_url = scrapertools.find_single_match(page_html, regex_api)
iframe_html = get_url_contents(iframe_url)
itemlist.extend(servertools.find_video_items(data=iframe_html))
qualities = ["360", "480", "720", "1080"]
for videoitem in itemlist:
if videoitem.url in encontrados:
continue
encontrados.append(videoitem.url)
videoitem.fulltitle = item.fulltitle
videoitem.title = "%s en calidad [%s]" % (videoitem.server, qualities[1])
videoitem.channel = item.channel
@@ -326,10 +321,11 @@ def findvideos(item):
videos_html = scrapertools.find_single_match(iframe_html, regex_video_list)
videos = re.findall('"([^"]+)"', videos_html, re.DOTALL)
for quality_id, video_url in enumerate(videos):
if video_url in encontrados:
continue
encontrados.append(video_url)
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
fulltitle=item.title))
return __sort_by_quality(itemlist)

View File

@@ -3,7 +3,7 @@
import re
import urlparse
from channels import renumbertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -28,6 +28,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Búsqueda", action="search", url=urlparse.urljoin(HOST, "busqueda?terminos=")))
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
@@ -113,7 +114,7 @@ def recientes(item):
matches = scrapertools.find_multiple_matches(data_recientes, patron)
for url, thumbnail, plot, title, cat in matches:
itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat))
itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat, context=renumbertools.context(item)))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
@@ -137,8 +138,11 @@ def episodios(item):
matches = scrapertools.find_multiple_matches(data, patron)
for url, scrapedtitle, episode in matches:
title = "1x" + episode + " " + "Episodio"
season = 1
episode = int(episode)
season, episode = renumbertools.numbered_for_tratk(item.channel, scrapedtitle, season, episode)
title = "%sx%s %s" % (season, str(episode).zfill(2), scrapedtitle)
itemlist.append(item.clone(title=title, url=url, action='findvideos'))
if config.get_videolibrary_support:

View File

@@ -0,0 +1,20 @@
{
"id": "asialiveaction",
"name": "Asialiveaction.",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "asialiveaction.png",
"banner": "https://imgur.com/B1IOAu4.png",
"version": 1,
"changes": [
{
"date": "08/10/2017",
"description": "Primera versión del canal"
}
],
"categories": [
"movie",
"serie"
]
}

View File

@@ -0,0 +1,188 @@
# -*- coding: UTF-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = "http://www.asialiveaction.com"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="estrenos", title="Estrenos", url=host))
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
url=urlparse.urljoin(host, "p/peliculas.html")))
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "p/series.html")))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host))
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "/search?q=")))
return itemlist
def category(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_generos = "<h2 class='title'>"+item.title+"<\/h2><div class='.+?'><ul class='.+?'><(.+?)><\/ul><\/div>"
data_generos = scrapertools.find_single_match(data, patron_generos)
patron = "<a href='(.+?)'>(.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data_generos, patron)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle != 'Próximas Películas':
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def estrenos(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_estre = "<div class='widget HTML' data-version='1' id='HTML9'><h2 class='title'>(.+?)<\/a><\/li><\/ul>"
data_estre = scrapertools.find_single_match(data, patron_estre)
patron = '<i class="([^"]+)"><\/i><div class="calidad">.+?' #serie o peli
patron +='<img src="([^"]+)"\/>' #scrapedthumbnail
patron +='<h4>([^"]+)<\/h4>.+?' #scrapedtitle
patron +='<a href="([^"]+)">' #scrapedurl
matches = scrapertools.find_multiple_matches(data_estre, patron)
for scrapedtype, scrapedthumbnail,scrapedtitle,scrapedurl in matches:
title = "%s [%s]" % (scrapedtitle, scrapedtype)
if scrapedtype == "pelicula":
itemlist.append(item.clone(title=title, url=host+scrapedurl, action="findvideos", extra=scrapedtype,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie",
context=["buscar_trailer"]))
else:
itemlist.append(item.clone(title=title, url=host+scrapedurl, show=scrapedtitle,
thumbnail=scrapedthumbnail, action="capitulos"))
return itemlist
def capitulos(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_datos='<div class="output">(.+?)><\/section>'
data_caps = scrapertools.find_single_match(data, patron_datos)
patron_caps='<img alt=".+?" src="(.+?)"\/><a href="http:\/\/bit.ly\/(.+?)"'
matches = scrapertools.find_multiple_matches(data_caps, patron_caps)
cap=0
for scrapedthumbnail,scrapedurl in matches:
link = scrapedurl
cap=cap+1
link="http://www.trueurl.net/?q=http%3A%2F%2Fbit.ly%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<A title="http:\/\/privatelink.de\/\?(.+?)"'
url = scrapertools.find_single_match(data_other, patron)
title="%s%s - %s" % (title,str(cap).zfill(2),item.show)
itemlist.append(item.clone(action='findvideos', title=title,
url=url,show=item.show,thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
return itemlist
def bitly(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="http:\/\/bit.ly\/(.+?)"'
link = scrapertools.find_single_match(data, patron)
link="http://www.trueurl.net/?q=http%3A%2F%2Fbit.ly%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<A title="http:\/\/privatelink.de\/\?(.+?)"'
url = scrapertools.find_single_match(data_other, patron)
if item.contentType=="movie":
contentType="movie"
else:
contentType="serie"
item=(item.clone(action='findvideos',url=url,show=item.show, thumbnail=item.thumbnail, contentType=contentType))
return item
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<i class="(.+?)"><\/i>' # scrapedtype
patron +='<div class="calidad">(.+?)<\/div>' # scrapedquality
patron += '<img src="(.+?)"\/>' # scrapedthumbnail
patron += '<h4>(.+?)<\/h4>' # scrapedtitle
patron += "<h5>(.+?)<\/h5>" # scrapedyear
patron += '<a href="(.+?)"' # scrapedurl
#patron += "<\/a>.+?<div class='item-snippet'>(.+?)<" # scrapedplot
if item.title!="Prueba":
pat='<div id="tab-1"><ul class="post-gallery">(.+?)<\/ul><\/div>'
data=scrapertools.find_single_match(data, pat)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype,scrapedquality,scrapedthumbnail,scrapedtitle,scrapedyear,scrapedurl in matches:
patron_quality="<span>(.+?)</span>"
quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality)
qual=""
for calidad in quality:
qual=qual+"["+calidad+"] "
title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual)
if item.title =="Series":
itemlist.append(item.clone(title=title, url=host+scrapedurl, extra=scrapedtitle, plot=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="serie", action="capitulos"))
elif scrapedtype != 'serie':
itemlist.append(
item.clone(title=title, url=host+scrapedurl, action="findvideos", extra=scrapedtype, plot=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
# Paginacion
patron_genero = '<h1>([^"]+)<\/h1>'
genero = scrapertools.find_single_match(data, patron_genero)
if genero == "Romance" or genero == "Drama":
patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente "
else:
patron = "<span class='current'>.+?href='(.+?)'>"
next_page_url = scrapertools.find_single_match(data, patron)
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if item.extra == 'pelicula':
item = bitly(item)
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
show = item.show
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie":
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
return itemlist

View File

@@ -98,10 +98,10 @@ def listado(item):
fichas = data
page_extra = item.extra
patron = '<li><a href="([^"]+).*?' # url
patron += 'title="([^"]+).*?' # titulo
patron += '<img src="([^"]+)"[^>]+>.*?' # thumbnail
patron += '<span>([^<]*)</span>' # calidad
patron = '<a href="([^"]+).*?' # la url
patron += 'title="([^"]+).*?' # el titulo
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
patron += '<span>([^<].*?)<' # la calidad
matches = re.compile(patron, re.DOTALL).findall(fichas)
logger.debug('item.next_page: %s'%item.next_page)
@@ -167,14 +167,12 @@ def listado(item):
logger.debug('context: %s' % context)
if not 'array' in title:
new_item = Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
extra = extra,
show = context_title, contentTitle=context_title, contentType=context,
context=["buscar_trailer"], infoLabels= {'year':year})
if year:
tmdb.set_infoLabels_item(new_item, seekTmdb = True)
itemlist.append(new_item)
context=["buscar_trailer"], infoLabels= {'year':year}))
tmdb.set_infoLabels(itemlist, True)

View File

@@ -140,11 +140,8 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedidioma, scrapedcalidad in matches:
title = "%s [" + scrapedcalidad + "][" + scrapedidioma +"]"
if "youtube" in scrapedurl:
scrapedurl += "&"
quality = scrapedcalidad
language = scrapedidioma
if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):

View File

@@ -0,0 +1,61 @@
{
"id": "pelismundo",
"name": "Pelismundo",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s26.postimg.org/72c9mr3ux/pelismundo1.png",
"banner": "",
"version": 1,
"changes": [
{
"date": "12/10/2017",
"description": "Primera version"
}
],
"categories": [
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,244 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Canal para pelismundo
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
__channel__='allcalidad'
host = "http://www.pelismundo.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Recientes", action = "peliculas", url = host))
itemlist.append(Item(channel = item.channel, title = "Por audio", action = "filtro", url = host, filtro = "Películas por audio"))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "filtro", url = host, filtro = "Películas por género"))
itemlist.append(Item(channel = item.channel))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + 'genero/infantil/'
elif categoria == 'terror':
item.url = host + 'genero/terror/'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
item.url += "?s="
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'search-results-content infinite.*?</ul>'
bloque = scrapertools.find_single_match(data, patron)
patron = '(?s)href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'Idioma.*?tag">([^<]+).*?'
patron += 'Calidad(.*?<)\/'
match = scrapertools.find_multiple_matches(bloque, patron)
scrapertools.printMatches(match)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlanguages, scrapedquality in match:
year = scrapertools.find_single_match(scrapedtitle, '[0-9]{4}')
scrapedquality = scrapertools.find_single_match(scrapedquality, 'rel="tag">([^<]+)<')
st = scrapertools.find_single_match(scrapedtitle, '(?i)Online.*')
scrapedtitle = scrapedtitle.replace(st, "")
st = scrapertools.find_single_match(scrapedtitle, '\(.*?\)')
scrapedtitle = scrapedtitle.replace(st, "")
title = scrapedtitle
if year:
title += " (" + year + ")"
if scrapedquality:
title += " (" + scrapedquality + ")"
patronidiomas = ''
idiomas_disponibles = []
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano')
if matchidioma:
idiomas_disponibles.append("ESP")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado')
if matchidioma:
idiomas_disponibles.append("VOSE")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino')
if matchidioma:
idiomas_disponibles.append("LAT")
idiomas_disponibles1 = ""
if idiomas_disponibles:
idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]"
title += " %s" %idiomas_disponibles1
itemlist.append(Item(channel = item.channel,
action = "findvideos",
title = title,
contentTitle = scrapedtitle,
thumbnail = scrapedthumbnail,
quality = scrapedquality,
language = idiomas_disponibles,
infoLabels={"year": year},
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
if url_pagina != "":
pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)")
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina))
return itemlist
def filtro(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'class="sbi-header">%s.*?</ul>' %item.filtro
bloque = scrapertools.find_single_match(data, patron)
patron = '(?s)href="([^"]+)".*?'
patron += '</span>([^<]+)</a>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches:
if "eroti33cas" in title and config.get_setting("adult_mode") == 0:
continue
itemlist.append(item.clone(action = "peliculas",
title = title.title(),
url = url
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'movie-list" class="clearfix.*?pagination movie-pagination clearfix'
bloque = scrapertools.find_single_match(data, patron)
patron = '(?s)href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'class="mq([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '_audio(.*?)class.*?'
patron += 'label_year">([^<]+)<'
match = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedquality, scrapedthumbnail, scrapedlanguages, year in match:
year = scrapertools.find_single_match(year, '[0-9]{4}')
st = scrapertools.find_single_match(scrapedtitle, '(?i)Online.*')
scrapedtitle = scrapedtitle.replace(st, "").strip()
st = scrapertools.find_single_match(scrapedtitle, '\(.*?\)')
scrapedtitle = scrapedtitle.replace(st, "")
title = scrapedtitle
if year:
title += " (" + year + ")"
if scrapedquality:
title += " (" + scrapedquality + ")"
patronidiomas = ''
idiomas_disponibles = []
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano')
if matchidioma:
idiomas_disponibles.append("ESP")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado')
if matchidioma:
idiomas_disponibles.append("VOSE")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino')
if matchidioma:
idiomas_disponibles.append("LAT")
idiomas_disponibles1 = ""
if idiomas_disponibles:
idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]"
title += " %s" %idiomas_disponibles1
itemlist.append(Item(channel = item.channel,
action = "findvideos",
title = title,
contentTitle = scrapedtitle,
thumbnail = scrapedthumbnail,
quality = scrapedquality,
language = idiomas_disponibles,
infoLabels={"year": year},
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
if url_pagina != "":
pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)")
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'SegundaParte.*?ventana-flotante'
bloque = scrapertools.find_single_match(data, patron)
patron = 'hand" rel="([^"]+)".*?'
patron += 'optxt"><span>([^<]+)</span>.*?'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedlanguage in matches:
if "youtube" in scrapedurl:
scrapedurl += "&"
title = "Ver en: %s " + "(" + scrapedlanguage + ")"
itemlist.append(item.clone(action = "play",
title = title,
language = item.language,
quality = item.quality,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse

View File

@@ -255,10 +255,10 @@ def findvideos(item):
dic_servers = {'ntfof': 'Servidor Desconocido', 'stramango': 'streamango', 'flasht': 'flashx'}
data1 = downloadpage(item.url)
patron = 'onclick="redir\(([^\)]+).*?'
patron = '(?s)onclick="redir\(([^\)]+).*?'
patron += '<img style="float:left" src="./[^/]+/([^\.]+).+?'
patron += '<span[^>]+>([^<]+).*?'
patron += '<img(.*?)onerror'
patron += '<img(.*?)on'
if "Descarga:</h1>" in data1:
list_showlinks = [('Online:', 'Online:</h1>(.*?)Descarga:</h1>'),

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View File

@@ -10,7 +10,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([0-9a-zA-Z-_]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
@@ -18,7 +18,7 @@
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)\"https://(?!docs)(.*?).googleusercontent.com/([^\"]+)",
"pattern": "(?s)\"https://(lh.*?).googleusercontent.com/([^\"]+)",
"url": "https://\\1.googleusercontent.com/\\2"
}
]

View File

@@ -58,7 +58,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
matches = scrapertools.find_multiple_matches(data, "[src|file]:'([^']+)'")
video_urls = []
for video_url in matches:
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{40,}')
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{78,}')
hash = decrypt(_hash, key)
video_url = video_url.replace(_hash, hash)