Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2017-09-25 08:05:30 -03:00
31 changed files with 1445 additions and 568 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.0.4" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.0.6" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,11 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» divxatope » torrentlocura
» peliculasrey » pelismundo
» gvideo » kingvid
» mailru » vidlox
» thevideome » bitp
¤ fix internos
» pedropolis » cinecalidad
» openload » cloudy
» uptobox ¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]msdos[/COLOR] y [COLOR yellow]prpeaprendiz[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -298,7 +298,7 @@ def findvideos(item):
if server_id in server_url:
server = server_id.lower()
thumbnail = servertools.guess_server_thumbnail(server_id)
thumbnail = item.contentThumbnail
if server_id == 'TVM':
server = 'thevideo.me'
url = server_url[server_id] + video_id + '.html'
@@ -367,7 +367,7 @@ def play(item):
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.extra
videoitem.thumbnail = item.contentThumbnail
videoitem.channel = item.channel
else:
itemlist.append(item)
@@ -463,3 +463,4 @@ def search(item, texto):
for line in sys.exc_info():
logger.error("%s" % line)
return []

View File

@@ -10,31 +10,10 @@
"thumbnail": "https://s2.postimg.org/jivgi4ak9/doomtv.png",
"banner": "https://s32.postimg.org/6gxyripvp/doomtv_banner.png",
"version": 1,
"changes": [
{
"date": "24/06/2017",
"description": "Cambios para autoplay"
},
{
"date": "06/06/2017",
"description": "COmpatibilida con AutoPlay"
},
{
"date": "12/05/2017",
"description": "Fix generos y enlaces"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/02/2017",
"description": "Release."
}
],
"categories": [
"latino",
"movie"
"movie",
"direct"
],
"settings": [
{

303
plugin.video.alfa/channels/doomtv.py Executable file → Normal file
View File

@@ -18,7 +18,7 @@ list_language = IDIOMAS.values()
CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'}
list_quality = CALIDADES.values()
list_servers = ['directo']
list_servers = ['directo', 'openload']
host = 'http://doomtv.net/'
headers = {
@@ -32,10 +32,10 @@ tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia Ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documentales": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Musical": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Bélico Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
@@ -56,7 +56,6 @@ tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
@@ -64,7 +63,7 @@ def mainlist(item):
action="lista",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url=host
url='%s%s'%(host,'peliculas/page/1')
))
itemlist.append(
@@ -72,34 +71,15 @@ def mainlist(item):
action="seccion",
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
url=host,
extra='generos'
url='%s%s' % (host, 'peliculas/page/1'),
))
itemlist.append(
item.clone(title="Mas vistas",
action="seccion",
item.clone(title="Mas Vistas",
action="lista",
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png',
url=host,
extra='masvistas'
))
itemlist.append(
item.clone(title="Recomendadas",
action="lista",
thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png',
fanart='https://s12.postimg.org/s881laywd/recomendadas.png',
url=host,
extra='recomendadas'
))
itemlist.append(
item.clone(title="Por año",
action="seccion",
thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png',
fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png',
url=host, extra='poraño'
url='%s%s'%(host,'top-imdb/page/1'),
))
itemlist.append(
@@ -110,8 +90,6 @@ def mainlist(item):
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -123,23 +101,11 @@ def lista(item):
next_page_url = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'movie-id=.*?href=(.*?) data-url.*?quality>(.*?)'
patron += '<img data-original=(.*?) class.*?<h2>(.*?)<\/h2>.*?<p>(.*?)<\/p>'
if item.extra == 'recomendadas':
patron = '<a href="(.*?)">.*?'
patron += '<div class="imgss">.*?'
patron += '<img src="(.*?)" alt="(.*?)(?:.*?|\(.*?|&#8211;|").*?'
patron += '<div class="imdb">.*?'
patron += '<\/a>.*?'
patron += '<span class="ttps">.*?<\/span>.*?'
patron += '<span class="ytps">(.*?)<\/span><\/div>'
elif item.extra in ['generos', 'poraño', 'buscar']:
patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?)(?:\s|\/)><a href=(.*?)>.*?'
patron += '<h2>.*?<\/h2>.*?(?:<span class=year>(.*?)<\/span>)?.*?<\/div>'
else:
patron = '<div class="imagen">.*?'
patron += '<img src="(.*?)" alt="(.*?)(?:.*?|\(.*?|&#8211;|").*?'
patron += '<a href="([^"]+)"><(?:span) class="player"><\/span><\/a>.*?'
patron += 'h2>\s*.*?(?:year)">(.*?)<\/span>.*?<\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.next_page != 'b':
@@ -150,39 +116,36 @@ def lista(item):
else:
matches = matches[max_items:]
next_page = 'a'
patron_next_page = '<div class="siguiente"><a href="(.*?)"|\/\?'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
next_page_url = urlparse.urljoin(item.url, matches_next_page[0])
next_page_str = scrapertools.find_single_match(data,"<li class='active'><a class=''>(\d+)</a>")
next_page_num = int(next_page_str)+1
page_base = re.sub(r'(page\/\d+)','', item.url)
next_page_url = '%s%s%s'%(page_base,'page/',next_page_num)
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
if item.extra == 'recomendadas':
url = scrapedthumbnail
title = scrapedurl
thumbnail = scrapedtitle
else:
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapedtitle
year = scrapedyear
if next_page_url:
next_page_url = next_page_url
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
filtro_list = filtro_list.items()
title = scrapedtitle
fanart = ''
plot = ''
if 'serie' not in url:
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentTitle=title,
infoLabels={'year': year},
context=autoplay.context
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
plot = plot
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
infoLabels={'filtro': filtro_list},
fanart=fanart,
contentTitle=title
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Paginacion
if next_page_url != '':
itemlist.append(
@@ -203,17 +166,8 @@ def seccion(item):
itemlist = []
duplicado = []
data = httptools.downloadpage(item.url).data
if item.extra == 'generos':
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
accion = 'lista'
if item.extra == 'masvistas':
patron = '<b>\d*<\/b>\s*<a href="(.*?)">(.*?<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>)'
accion = 'findvideos'
elif item.extra == 'poraño':
patron = '<li><a class="ito" HREF="(.*?)">(.*?)<\/a><\/li>'
else:
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)<\/i>'
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'menu-item-object-category menu-item-\d+><a href=(.*?)>(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -221,61 +175,19 @@ def seccion(item):
url = scrapedurl
title = scrapedtitle
thumbnail = ''
fanart = ''
plot = ''
year = ''
contentTitle = ''
if item.extra == 'masvistas':
year = re.findall(r'\b\d{4}\b', scrapedtitle)
title = re.sub(r'<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>', '', scrapedtitle)
contentTitle = title
title = title + ' (' + year[0] + ')'
elif item.extra == 'generos':
title = re.sub(r'<\/a> <i>\d+', '', scrapedtitle)
cantidad = re.findall(r'.*?<\/a> <i>(\d+)', scrapedtitle)
th_title = title
title = title + ' (' + cantidad[0] + ')'
thumbnail = tgenero[th_title]
fanart = thumbnail
if title in tgenero:
thumbnail = tgenero[title]
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action=accion,
action='lista',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentTitle=contentTitle,
infoLabels={'year': year}
thumbnail = thumbnail
))
duplicado.append(url)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def unpack(packed):
p, c, k = re.search("}\('(.*)', *\d+, *(\d+), *'(.*)'\.", packed, re.DOTALL).groups()
for c in reversed(range(int(c))):
if k.split('|')[c]: p = re.sub(r'(\b%s\b)' % c, k.split('|')[c], p)
p = p.replace('\\', '')
p = p.decode('string_escape')
return p
def getinfo(page_url):
info = ()
logger.info()
data = httptools.downloadpage(page_url).data
thumbnail = scrapertools.find_single_match(data, '<div class="cover" style="background-image: url\((.*?)\);')
plot = scrapertools.find_single_match(data, '<h2>Synopsis<\/h2>\s*<p>(.*?)<\/p>')
info = (plot, thumbnail)
return info
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
@@ -305,98 +217,47 @@ def newest(categoria):
return itemlist
def get_url(item):
logger.info()
itemlist = []
duplicado = []
patrones = ["{'label':(.*?),.*?'file':'(.*?)'}", "{file:'(.*?redirector.*?),label:'(.*?)'}"]
data = httptools.downloadpage(item.url, headers=headers, cookies=False).data
patron = 'class="player-content"><iframe src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option in matches:
if 'allplayer' in option:
url = 'http:/' + option.replace('//', '/')
data = httptools.downloadpage(url, headers=headers, cookies=False).data
packed = scrapertools.find_single_match(data, "<div id='allplayer'>.*?(eval\(function\(p,a,c,k.*?\)\)\))")
if packed:
unpacked = unpack(packed)
video_urls = []
if "vimeocdn" in unpacked:
streams = scrapertools.find_multiple_matches(unpacked,
"{file:'(.*?)',type:'video/.*?',label:'(.*?)'")
for video_url, quality in streams:
video_urls.append([video_url, quality])
else:
doc_id = scrapertools.find_single_match(unpacked, 'driveid=(.*?)&')
doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id
response = httptools.downloadpage(doc_url, cookies=False)
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '59': '480p'}
for itag, video_url in streams:
video_url += headers_string
video_urls.append([video_url, itags[itag]])
for video_item in video_urls:
calidad = video_item[1]
title = '%s [%s]' % (item.contentTitle, calidad)
url = video_item[0]
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action='play',
title=title,
url=url,
thumbnail=item.thumbnail,
plot=item.plot,
fanart=item.fanart,
contentTitle=item.contentTitle,
language=IDIOMAS['latino'],
server='directo',
quality=CALIDADES[calidad],
context=item.context
))
duplicado.append(url)
else:
itemlist.extend(servertools.find_video_items(data=option))
for videoitem in itemlist:
if 'Enlace' in videoitem.title:
videoitem.channel = item.channel
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
videoitem.language = 'latino'
videoitem.quality = 'default'
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist = get_url(item)
#itemlist = get_url(item)
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)
# Requerido para FilterTools
for option, urls in matches:
quality = scrapertools.find_single_match(data, '<div class=les-content><a href=#%s>(.*?)<\/a><\/div>'%option)
title = '%s (%s)' % (item.title, quality)
if 'content' in urls:
urls = '%s%s'%('http:',urls)
hidden_data = httptools.downloadpage(urls).data
hidden_data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", hidden_data)
patron = 'sources: \[{file: (.*?),'
matches = re.compile(patron, re.DOTALL).findall(hidden_data)
itemlist = filtertools.get_links(itemlist, item, list_language)
for videoitem in matches:
# Requerido para AutoPlay
autoplay.start(itemlist, item)
new_item = Item(
channel = item.channel,
url = videoitem,
title = title,
contentTitle = item.title,
action = 'play',
quality = quality
)
itemlist.append(new_item)
else:
new_item = Item(
channel=item.channel,
url=urls,
title=title,
contentTitle=item.title,
action='play',
quality = quality
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(

View File

@@ -412,8 +412,6 @@ def episodios(item):
season = match['season']
episode = match['episode']
infoLabels['season']= season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
quality=item.quality, multi=multi, contentSeason=season,
contentEpisodeNumber=episode, infoLabels = infoLabels))
@@ -421,8 +419,12 @@ def episodios(item):
# order list
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
if len(itemlist) > 1:
return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
return itemlist
def search(item, texto):

View File

@@ -0,0 +1,73 @@
{
"id": "pedropolis",
"name": "PedroPolis",
"active": true,
"adult": false,
"language": "es",
"fanart": "https://scontent-lht6-1.xx.fbcdn.net/v/t31.0-8/21056316_670362456502498_8317422545691005578_o.png?oh=1f13a23a931d82e944a7ec743a19f583&oe=5A599F4D",
"thumbnail": "https://scontent-lht6-1.xx.fbcdn.net/v/t1.0-9/20292600_467501756957771_6794721577753226614_n.jpg?oh=bba1479eccf0adceeb8c0d3450cc2531&oe=5A4EE0F5",
"banner": "",
"version": 1,
"changes": [
{
"date": "15/08/17",
"description": "Nuevo Canal"
}
],
"categories": [
"latino",
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,464 @@
# -*- coding: utf-8 -*-
# -*- Channel PedroPolis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "pedropolis"
host = "http://pedropolis.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
def mainlist(item):
logger.info()
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot",
thumbnail=get_thumb("channels_tvshow.png")),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
thumbnail=get_thumb('search.png'), url=host)]
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'movies/',
viewcontent='movies', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Más Valoradas", action="peliculas", text_blod=True, viewcontent='movies',
url=host + 'calificaciones/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="generos", text_blod=True, viewmode="movie_with_plot",
viewcontent='movies', url=host)]
return itemlist
def menuseries(item):
logger.info()
itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"),
item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'calificaciones/?get=tv', viewmode="movie_with_plot")]
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
patron += '<span class="quality">([^<]+)</span><a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)
# Paginación
if item.next_page != 'b':
if len(matches) > 19:
url_next_page = item.url
matches = matches[:19]
next_page = 'b'
else:
matches = matches[19:]
next_page = 'a'
patron_next_page = "<span class=\"current\">\d+</span><a href='([^']+)'"
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches:
if 'Proximamente' not in calidad:
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
'Español Latino', '').strip()
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad)
new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle,
infoLabels={'year': year, 'rating': rating}, thumbnail=scrapedthumbnail,
url=scrapedurl, next_page=next_page, quality=calidad, title=title)
if year:
tmdb.set_infoLabels_item(new_item, __modo_grafico__)
itemlist.append(new_item)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
thumbnail=get_thumb("next.png")))
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
item.fanart = scrapertools.find_single_match(data,
"<meta property='og:image' content='([^']+)' />").replace(
'w780', 'original')
item.plot = scrapertools.find_single_match(data, '<div itemprop="description" class="wp-content">.*?<p>(['
'^<]+)</p>')
item.plot = scrapertools.htmlclean(item.plot)
item.infoLabels['director'] = scrapertools.find_single_match(data,
'<div class="name"><a href="[^"]+">([^<]+)</a>')
item.infoLabels['rating'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>([^<]+)</strong>')
item.infoLabels['votes'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>['
'^<]+</strong>\s(.*?) votos</b>')
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />' # url, img, title
patron += '<span class="[^"]+">([^<]+)</span>.*?' # tipo
patron += '<span class="year">([^"]+)</span>.*?<div class="contenido"><p>([^<]+)</p>' # year, plot
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year, plot in matches:
title = scrapedtitle
if tipo == 'Serie':
contentType = 'tvshow'
action = 'temporadas'
title += ' [COLOR red](' + tipo + ')[/COLOR]'
else:
contentType = 'movie'
action = 'findvideos'
title += ' [COLOR green](' + tipo + ')[/COLOR]'
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar',
action=action, infoLabels={"year": year}, contentType=contentType,
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png")))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + "genre/animacion/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
data = scrapertools.find_single_match(data, 'Genero</a><ulclass="sub-menu">(.*?)</ul></li><li id')
patron = '<li id="[^"]+" class="menu-item.*?<a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle != 'Proximamente':
title = "%s" % scrapedtitle
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=title,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot"))
itemlist.sort(key=lambda it: it.title)
return itemlist
def series(item):
logger.info()
url_next_page = ''
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
if item.next_page != 'b':
if len(matches) > 19:
url_next_page = item.url
matches = matches[:19]
next_page = 'b'
else:
matches = matches[19:]
next_page = 'a'
patron_next_page = '<link rel="next" href="([^"]+)" />'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.replace('&#8217;', "'")
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
url=scrapedurl, thumbnail=scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
next_page=next_page, action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page,
next_page=next_page, thumbnail=get_thumb("next.png")))
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
item.fanart = scrapertools.find_single_match(data,
"<meta property='og:image' content='([^']+)' />").replace(
'w780', 'original')
item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2><div class="wp-content"><p>([^<]+)</p>')
item.plot = scrapertools.htmlclean(item.plot)
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
patron += '<img src="([^"]+)"></a></div>' # capítulos
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
scrapedseason = " ".join(scrapedseason.split())
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='serie')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadírselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url cap, img
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+) - (\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='serie')
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capítulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadírselo al titulo del item
i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div id="option-(\d+)" class="[^"]+"><iframe.*?src="([^"]+)".*?</iframe>' # lang, url
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?<img '
'src="http://pedropolis.com/wp-content/themes/dooplay/assets/img'
'/flags/(\w+)' % option)
idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]', 'pe': '[COLOR cornflowerblue](LAT)[/COLOR]',
'co': '[COLOR cornflowerblue](LAT)[/COLOR]', 'es': '[COLOR green](CAST)[/COLOR]',
'en': '[COLOR red](VOS)[/COLOR]', 'jp': '[COLOR green](VOS)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
# obtenemos los redirecionamiento de shorturl en caso de coincidencia
if "bit.ly" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle,
action='play', language=lang))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
for x in itemlist:
if x.extra != 'directo':
x.thumbnail = item.thumbnail
x.title = "Ver en: [COLOR yellow](%s)[/COLOR] %s" % (x.server.title(), x.language)
if item.extra != 'serie' and item.extra != 'buscar':
x.title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
x.server.title(), x.quality, x.language)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
itemlist.append(Item(channel=__channel__,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library",
thumbnail=get_thumb("videolibrary_movie.png"),
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -7,7 +7,7 @@ from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import logger, config
HOST = 'http://peliculasaudiolatino.com'
@@ -154,9 +154,18 @@ def findvideos(item):
for servidor, idioma, calidad, scrapedurl in matches:
url = scrapedurl
server = servertools.get_server_name(servidor)
title = item.title
title = "Enlace encontrado en %s" % (server)
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=scrapedthumbnail, language=idioma, quality=calidad, server=server))
if itemlist:
itemlist.append(Item(channel=item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir pelicula a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail,
fulltitle=item.fulltitle))
return itemlist

View File

@@ -89,7 +89,7 @@ def newest(categoria):
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
logger.error("%s" % line)
return []
return itemlist
@@ -215,14 +215,14 @@ def findvideos(item):
url = url + '|' + item.url
title = "%s - %s" % ('%s', title)
itemlist.append(Item (channel=item.channel, action="play", url=url, title=title, text_color=color3))
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if item.extra != "findvideos" and config.get_videolibrary_support():
itemlist.append(Item (channel=item.channel, title="Añadir película a la videoteca", \
action="add_pelicula_to_library",
extra="findvideos", text_color="green"))
itemlist.append(
item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library", extra="findvideos",
text_color="green"))
return itemlist

View File

@@ -198,6 +198,15 @@ def findvideos(item):
quality = quality
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel=item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir pelicula a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail,
fulltitle=item.fulltitle))
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "pelis24",
"name": "Pelis24",
"active": true,
"active": false,
"adult": false,
"language": "es",
"thumbnail": "pelis24.png",
@@ -49,4 +49,4 @@
"visible": true
}
]
}
}

View File

@@ -1,27 +1,38 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import config, logger
host = "http://peliscity.com"
def mainlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
patron = 'cat-item.*?span>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
can = 0
for cantidad in matches:
can += int(cantidad.replace(".", ""))
itemlist.append(
Item(channel=item.channel, title="Últimas agregadas", action="agregadas", url="http://peliscity.com",
Item(channel=item.channel, title="Películas: (%s)" %can, text_bold=True))
itemlist.append(
Item(channel=item.channel, title=" Últimas agregadas", action="agregadas", url= host,
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="agregadas",
url="http://peliscity.com/calidad/hd-real-720", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title=" Peliculas HD", action="agregadas",
url= host + "/calidad/hd-real-720", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title="Listado por género", action="porGenero", url="http://peliscity.com"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://peliscity.com/?s="))
itemlist.append(Item(channel=item.channel, title="Idioma", action="porIdioma", url="http://peliscity.com/"))
Item(channel=item.channel, title=" Listado por género", action="porGenero", url= host))
itemlist.append(Item(channel=item.channel, title=" Buscar", action="search", url= host + "/?s="))
itemlist.append(Item(channel=item.channel, title=" Idioma", action="porIdioma", url= host))
return itemlist
@@ -29,12 +40,12 @@ def mainlist(item):
def porIdioma(item):
itemlist = []
itemlist.append(Item(channel=item.channel, title="Castellano", action="agregadas",
url="http://www.peliscity.com/idioma/espanol-castellano/", viewmode="movie_with_plot"))
url= host + "/idioma/espanol-castellano/", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title="VOS", action="agregadas", url="http://www.peliscity.com/idioma/subtitulada/",
Item(channel=item.channel, title="VOS", action="agregadas", url= host + "/idioma/subtitulada/",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Latino", action="agregadas",
url="http://www.peliscity.com/idioma/espanol-latino/", viewmode="movie_with_plot"))
url= host + "/idioma/espanol-latino/", viewmode="movie_with_plot"))
return itemlist
@@ -43,15 +54,16 @@ def porGenero(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
logger.info("data=" + data)
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<'
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<.*?span>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for urlgen, genero in matches:
itemlist.append(Item(channel=item.channel, action="agregadas", title=genero, url=urlgen, folder=True,
for urlgen, genero, cantidad in matches:
cantidad = cantidad.replace(".", "")
titulo = genero + " (" + cantidad + ")"
itemlist.append(Item(channel=item.channel, action="agregadas", title=titulo, url=urlgen, folder=True,
viewmode="movie_with_plot"))
return itemlist
@@ -60,7 +72,7 @@ def porGenero(item):
def search(item, texto):
logger.info()
texto_post = texto.replace(" ", "+")
item.url = "http://www.peliscity.com/?s=" + texto_post
item.url = host + "/?s=" + texto_post
try:
return listaBuscar(item)
@@ -76,7 +88,7 @@ def agregadas(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;|"', "", data)
patron = scrapertools.find_multiple_matches (data,'<divclass=col-mt-5 postsh>.*?Duración')
@@ -92,10 +104,18 @@ def agregadas(item):
plot = info[4]
year = info[5].strip()
itemlist.append(Item(channel=item.channel, title=title, url=url, action='findvideos',thumbnail=thumbnail,
itemlist.append(Item(channel=item.channel,
action='findvideos',
contentType = "movie",
fulltitle = title,
infoLabels={'year':year},
plot=plot,
quality=quality, infoLabels={'year':year}))
quality=quality,
thumbnail=thumbnail,
title=title,
contentTitle = title,
url=url
))
# Paginación
try:
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
@@ -113,7 +133,7 @@ def listaBuscar(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n", " ", data)
logger.info("data=" + data)
@@ -135,7 +155,7 @@ def findvideos(item):
plot = item.plot
# Descarga la pagina
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -150,8 +170,14 @@ def findvideos(item):
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=plot, show=item.show, quality= quality, language=language, extra = item.thumbnail))
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.title,
extra="library"))
return itemlist

View File

@@ -7,6 +7,7 @@ from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
@@ -237,39 +238,43 @@ def findvideos(item):
language=lang,
url=url
))
logger.debug('templist: %s' % templist)
for videoitem in templist:
logger.debug('videoitem.language: %s' % videoitem.language)
data = httptools.downloadpage(videoitem.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
id = scrapertools.find_single_match(data, 'var _SOURCE =.*?source:(.*?),')
if videoitem.language == 'SUB':
sub = scrapertools.find_single_match(data, 'var _SOURCE =.*?srt:(.*?),')
sub = sub.replace('\\', '')
else:
sub = ''
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (id, sub)
urls_list = scrapertools.find_multiple_matches(data, '({"type":.*?})')
for element in urls_list:
json_data=jsontools.load(element)
data = httptools.downloadpage(new_url).data
id = json_data['id']
sub = json_data['srt']
url = json_data['source']
url = scrapertools.find_single_match(data, '<iframe src="(.*?preview)"')
title = videoitem.contentTitle + ' (' + audio[videoitem.language] + ')'
logger.debug('url: %s' % url)
video_list.extend(servertools.find_video_items(data=url))
for urls in video_list:
if urls.language == '':
urls.language = videoitem.language
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server)
logger.debug('video_list: %s' % video_list)
# itemlist.append(item.clone(title= title, url = url, action = 'play', subtitle = sub))
quality = json_data['quality']
if 'http' not in url :
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)
video_list.extend(servertools.find_video_items(data=data))
for urls in video_list:
if urls.language == '':
urls.language = videoitem.language
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server)
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
video_url.quality = quality
else:
server = servertools.get_server_from_url(url)
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality,
server=server))
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
video_list.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,

View File

@@ -422,7 +422,6 @@ def findvideos(item):
itemlist = []
duplicados = []
data = httptools.downloadpage(item.url).data
logger.debug('data: %s'%data)
video_page = scrapertools.find_single_match(data, "<iframe width='100%' height='500' src='(.*?)' frameborder='0'")
data = httptools.downloadpage(video_page).data
patron = '<li data-id=".*?">\s+<a href="(.*?)" >'
@@ -471,7 +470,7 @@ def findvideos(item):
videoitem.quality = 'default'
videoitem.language = 'Latino'
if videoitem.server != '':
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
videoitem.thumbnail = item.contentThumbnail
else:
videoitem.thumbnail = item.thumbnail
videoitem.server = 'directo'
@@ -538,6 +537,6 @@ def newest(categoria):
logger.error("{0}".format(line))
return []
return itemlist
itemlist = filtertools.get_links(itemlist, item, list_language)

View File

@@ -0,0 +1,27 @@
{
"id": "pelisplusco",
"name": "PelisPlus.co",
"active": true,
"adult": false,
"thumbnail": "https://s26.postimg.org/jov1pmbh5/pelisplusco.png",
"banner": "https://s26.postimg.org/4hf259jmh/pelisplusco-banner.png",
"version": 1,
"categories": [
"latino",
"movie",
"tvshow",
"documentary",
"direct"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -0,0 +1,310 @@
# -*- coding: utf-8 -*-
# -*- Channel PelisPlus.co -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://pelisplus.co'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_menu",
))
itemlist.append(item.clone(title="Series",
action="series_menu",
))
return itemlist
def movie_menu(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="list_all",
url = host+'/estrenos/',
type = 'normal'
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host,
seccion='generos'
))
itemlist.append(item.clone(title="Por Año",
action="seccion",
url=host,
seccion='anios'
))
return itemlist
def series_menu(item):
logger.info()
itemlist =[]
itemlist.append(item.clone(title="Todas",
action="list_all",
url=host + '/series/',
type='serie'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all (item):
logger.info ()
itemlist = []
if item.type not in ['normal', 'seccion', 'serie']:
post = {'page':item.page, 'type':item.type,'id':item.id}
post = urllib.urlencode(post)
data =httptools.downloadpage(item.url, post=post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
else:
data = get_source(item.url)
if item.type == 'serie' or item.type == 'recents':
contentType = 'serie'
action = 'seasons'
else:
contentType = 'pelicula'
action = 'findvideos'
patron = 'item-%s><a href=(.*?)><figure><img src=https:(.*?)'%contentType
patron += ' alt=><\/figure><p>(.*?)<\/p><span>(.*?)<\/span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
url = host+scrapedurl
thumbnail = scrapedthumbnail
plot= ''
contentTitle=scrapedtitle
title = contentTitle
year = scrapedyear
fanart =''
new_item=item.clone(action=action,
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
infoLabels ={'year':year}
)
if contentType =='serie':
new_item.contentSerieName=title
else:
new_item.contentTitle = title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
next_page_valid = scrapertools.find_single_match(data, '<div class=butmore(?: site=series|) page=(.*?) id=(.*?) '
'type=(.*?) limit=.*?>')
if item.type != 'normal' and (len(itemlist)>19 or next_page_valid):
type = item.type
if item.type == 'serie':
type = 'recents'
if next_page_valid:
page = str(int(next_page_valid[0])+1)
if item.type != 'recents':
id = next_page_valid[1]
type = next_page_valid[2]
else:
id =''
else:
page = str(int(item.page)+1)
id = item.id
if type =='recents':
type_pagination = '/series/pagination'
else:
type_pagination = '/pagination'
url = host+type_pagination
itemlist.append(item.clone(action = "list_all",
title = 'Siguiente >>>',
page=page,
url = url,
id = id,
type = type
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.seccion == 'generos':
patron = '<li><a href=(.*?)><i class=ion-cube><\/i>(.*?)<\/span>'
type = 'genre'
elif item.seccion == 'anios':
patron = '<li><a href=(\/peliculas.*?)>(\d{4})<\/a>'
type = 'year'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if item.seccion == 'generos':
cant = re.sub(r'.*?<span class=cant-genre>','',scrapedtitle)
only_title = re.sub(r'<.*','',scrapedtitle).rstrip()
title = only_title+' (%s)'%cant
url = host+scrapedurl
itemlist.append(
Item(channel=item.channel,
action="list_all",
title=title,
fulltitle=item.title,
url=url,
type = 'seccion'
))
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <')
next_page_url = host + next_page
import inspect
if next_page != '':
itemlist.append(item.clone(action="seccion",
title='Siguiente >>>',
url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
))
return itemlist
def seasons(item):
logger.info()
itemlist =[]
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron ='<i class=ion-chevron-down arrow><\/i>(.*?)<\/div>'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for title in matches:
season = title.replace('Temporada ','')
infoLabels['season'] = season
itemlist.append(Item(
channel=item.channel,
title=title,
url=item.url,
action='season_episodes',
contentSerieName= item.contentSerieName,
contentSeasonNumber = season,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def season_episodes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
season = str(item.infoLabels['season'])
patron = '<a href=(.*?temporada-%s\/.*?) title=.*?i-play><\/i> (.*?)<\/a>'%season
matches = matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for url, episode in matches:
episodenumber = re.sub('C.* ','',episode)
infoLabels['episode'] = episodenumber
itemlist.append(Item(channel=item.channel,
title= episode,
url = host+url,
action = 'findvideos',
infoLabels=infoLabels,
contentEpisodeNumber=episode
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist = []
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'data-source=(.*?) data.*?-srt=(.*?) data-iframe=0><a>(.*?) - (.*?)<\/a>'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
for url, sub, language, quality in matches:
if 'http' not in url:
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)
video_list.extend(servertools.find_video_items(data=data))
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
video_url.title = item.title + '(%s) (%s)' % (language, video_url.server)
if video_url.language == '':
video_url.language = language
video_url.subtitle = sub
video_url.contentTitle=item.contentTitle
else:
server = servertools.get_server_from_url(url)
video_list.append(item.clone(title=item.title,
url=url,
action='play',
quality = quality,
language = language,
server=server,
subtitle = sub
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return video_list

View File

@@ -0,0 +1,44 @@
{
"id": "thumbzilla",
"name": "ThumbZilla",
"active": true,
"adult": true,
"language": "en",
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
"thumbnail": "https://image.spreadshirtmedia.com/image-server/v1/designs/1002274824,width=178,height=178/thumbzilla-womens-white-tee-big-logo.png",
"banner": "",
"version": 1,
"changes": [
{
"date": "07/06/17",
"description": "Canal Nuevo"
}
],
"categories": [
"adult"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,166 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import channeltools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "thumbzilla"
host = 'https://www.thumbzilla.com/'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFF6E2802', '0xFFFAA171', '0xFFE9D7940'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']]
if __perfil__ - 1 >= 0:
color1, color2, color3 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/%s.png'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host,
viewmode="movie", thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + 'newest',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + 'tending',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + 'top',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Populares", url=host + 'popular',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + 'hd',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + 'hd',
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host,
thumbnail=get_thumb("channels_adult.png"), extra="buscar"))
return itemlist
# REALMENTE PASA LA DIRECCION DE BUSQUEDA
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "video/search?q={0}".format(texto))
# item.url = item.url % tecleado
item.extra = "buscar"
try:
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a class="[^"]+" href="([^"]+)">' # url
patron += '<img id="[^"]+".*?src="([^"]+)".*?' # img
patron += '<span class="title">([^<]+)</span>.*?' # title
patron += '<span class="duration">([^<]+)</span>' # time
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
title = "[%s] %s" % (time, scrapedtitle)
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, thumbnail=scrapedthumbnail,
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
if paginacion:
itemlist.append(Item(channel=item.channel, action="videos",
thumbnail=thumbnail % 'rarrow',
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
patron += '<span class="count">([^<]+)</span>' # title, vids
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, vids in matches:
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
title = "%s (%s)" % (scrapedtitle, vids.title())
thumbnail = item.thumbnail
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
title=title, url=url, thumbnail=thumbnail,
viewmode="movie_with_plot", folder=True))
return itemlist
def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '"quality":"([^"]+)","videoUrl":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, scrapedurl in matches:
scrapedurl = scrapedurl.replace('\\', '')
title = "[COLOR yellow](%s)[/COLOR] %s" % (calidad, item.contentTile)
server = servertools.get_server_from_url(scrapedurl)
itemlist.append(item.clone(action='play', title=title, server=server, mediatype='movie', url=scrapedurl))
return itemlist

View File

@@ -48,6 +48,14 @@
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",

View File

@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
import base64
import re
from core import channeltools
from core import httptools
from core import scrapertoolsV2
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
@@ -27,22 +27,13 @@ def mainlist(item):
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
url=HOST + "/ultimas-y-actualizadas",
url=HOST,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
text_color=color3, text_bold=True, thumbnail=thumbnail_host))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails"))
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="audio", thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="quality", thumbnail=thumbnail % 'calidad'))
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="year", thumbnail=thumbnail % 'year'))
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="Género", action="menu_buscar_contenido", text_bold=True,thumbnail=thumbnail % 'generos', viewmode="thumbnails",
url=HOST
))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
@@ -55,8 +46,7 @@ def search(item, texto):
itemlist = []
try:
# http://www.yaske.ro/search/?q=los+pitufos
item.url = HOST + "/search/?q=" + texto.replace(' ', '+')
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if itemlist[-1].title == ">> Página siguiente":
@@ -80,9 +70,9 @@ def newest(categoria):
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST + "/ultimas-y-actualizadas"
item.url = HOST
elif categoria == 'infantiles':
item.url = HOST + "/search/?q=&genre%5B%5D=animation"
item.url = HOST + "/genre/16/"
else:
return []
@@ -103,59 +93,46 @@ def newest(categoria):
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ""
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<article class.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<aside class="item-control down">(.*?)</aside>.*?'
patron += '<small class="pull-right text-muted">([^<]+)</small>.*?'
patron += '<h2 class.*?>([^<]+)</h2>'
patron = 'class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'post-item-flags"> (.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches:
patronidiomas = "<img src='([^']+)'"
matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas)
patron_next_page = 'href="([^"]+)"> &raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if len(matches_next_page) > 0:
url_next_page = item.url + matches_next_page
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
year = year.strip()
patronidiomas = '<img src="([^"]+)"'
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
if idioma.endswith("la_la.png"):
if idioma.endswith("/la.png"):
idiomas_disponibles.append("LAT")
elif idioma.endswith("en_en.png"):
elif idioma.endswith("/en.png"):
idiomas_disponibles.append("VO")
elif idioma.endswith("en_es.png"):
elif idioma.endswith("/en_es.png"):
idiomas_disponibles.append("VOSE")
elif idioma.endswith("es_es.png"):
elif idioma.endswith("/es.png"):
idiomas_disponibles.append("ESP")
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
@@ -163,48 +140,32 @@ def peliculas(item):
if url_next_page:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True))
url=url_next_page, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<select name="' + item.extra + '(.*?)</select>'
data = scrapertoolsV2.get_match(data, patron)
patron = 'Generos.*?</ul>'
data = scrapertools.find_single_match(data, patron)
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedvalue, scrapedtitle in matches:
thumbnail = ""
if item.extra == 'genre':
if scrapedtitle.strip() in ['Documental', 'Short', 'News']:
continue
url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue
filename = scrapedtitle.lower().replace(' ', '%20')
if filename == "ciencia%20ficción":
filename = "ciencia%20ficcion"
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
% filename
elif item.extra == 'year':
url = HOST + "/search/?q=&year=" + scrapedvalue
thumbnail = item.thumbnail
else:
# http://www.yaske.ro/search/?q=&quality%5B%5D=c9
# http://www.yaske.ro/search/?q=&audio%5B%5D=es
url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1,
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot"))
patron = 'href="([^"]+)">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
url = HOST + scrapedurl
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = scrapedtitle,
url = url,
text_color = color1,
contentType = 'movie',
folder = True,
viewmode = "movie_with_plot"
))
if item.extra in ['genre', 'audio', 'year']:
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
@@ -214,29 +175,28 @@ def menu_buscar_contenido(item):
def findvideos(item):
logger.info()
itemlist = list()
sublist = list()
itemlist = []
sublist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
url = "http://widget.olimpo.link/playlist/?tmdb=" + scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
data = httptools.downloadpage(url).data
if not item.plot:
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
patron = '<option value="([^"]+)"[^>]+'
patron += '>([^<]+).*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '(/embed/[^"]+).*?'
patron += 'quality text-overflow ">([^<]+).*?'
patron += 'title="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, idioma, calidad in matches:
if 'yaske' in url:
for url, calidad, idioma in matches:
if 'embed' in url:
url = "http://widget.olimpo.link" + url
data = httptools.downloadpage(url).data
url_enc = scrapertoolsV2.find_single_match(data, "eval.*?'(.*?)'")
url_dec = base64.b64decode(url_enc)
url = scrapertoolsV2.find_single_match(url_dec, 'iframe src="(.*?)"')
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
url = scrapertools.find_single_match(data, 'iframe src="([^"]+)')
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip()))
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
# Añadir servidores encontrados, agrupandolos por idioma

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Scraper tools for reading and processing web elements
# --------------------------------------------------------------------------------
@@ -214,6 +214,7 @@ def htmlclean(cadena):
cadena = cadena.replace("<tr>", "")
cadena = cadena.replace("</tr>", "")
cadena = cadena.replace("<![CDATA[", "")
cadena = cadena.replace("<wbr>", "")
cadena = cadena.replace("<Br />", " ")
cadena = cadena.replace("<BR />", " ")
cadena = cadena.replace("<Br>", " ")

View File

@@ -81,6 +81,7 @@ def htmlclean(cadena):
cadena = cadena.replace("<tr>", "")
cadena = cadena.replace("</tr>", "")
cadena = cadena.replace("<![CDATA[", "")
cadena = cadena.replace("<wbr>", "")
cadena = cadena.replace("<Br />", " ")
cadena = cadena.replace("<BR />", " ")
cadena = cadena.replace("<Br>", " ")

View File

@@ -131,17 +131,26 @@ def render_items(itemlist, parent_item):
else:
icon_image = "DefaultVideo.png"
# Creamos el listitem
listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=item.thumbnail)
# Ponemos el fanart
if item.fanart:
listitem.setProperty('fanart_image', item.fanart)
fanart = item.fanart
else:
listitem.setProperty('fanart_image', os.path.join(config.get_runtime_path(), "fanart.jpg"))
fanart = os.path.join(config.get_runtime_path(), "fanart.jpg")
# TODO: ¿Se puede eliminar esta linea? yo no he visto que haga ningun efecto.
xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg"))
# Creamos el listitem
listitem = xbmcgui.ListItem(item.title)
# values icon, thumb or poster are skin dependent.. so we set all to avoid problems
# if not exists thumb it's used icon value
if config.get_platform(True)['num_version'] >= 16.0:
listitem.setArt({'icon': icon_image, 'thumb': item.thumbnail, 'poster': item.thumbnail, 'fanart': fanart})
else:
listitem.setIconImage(icon_image)
listitem.setThumbnailImage(item.thumbnail)
listitem.setProperty('fanart_image', fanart)
# No need it, use fanart instead
# xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg"))
# Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl()
# if item.isPlayable == True or (config.get_setting("player_mode") == 1 and item.action == "play"):
@@ -157,7 +166,10 @@ def render_items(itemlist, parent_item):
context_commands = set_context_commands(item, parent_item)
# Añadimos el item
listitem.addContextMenuItems(context_commands, replaceItems=True)
if config.get_platform(True)['num_version'] >= 17.0:
listitem.addContextMenuItems(context_commands)
else:
listitem.addContextMenuItems(context_commands, replaceItems=True)
if not item.totalItems:
item.totalItems = 0
@@ -166,7 +178,7 @@ def render_items(itemlist, parent_item):
totalItems=item.totalItems)
# Fijar los tipos de vistas...
if config.get_setting("forceview") == True:
if config.get_setting("forceview"):
# ...forzamos segun el viewcontent
xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent)
# logger.debug(parent_item)
@@ -184,7 +196,7 @@ def render_items(itemlist, parent_item):
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
# Fijar la vista
if config.get_setting("forceview") == True:
if config.get_setting("forceview"):
viewmode_id = get_viewmode_id(parent_item)
xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id)
@@ -256,10 +268,6 @@ def set_infolabels(listitem, item, player=False):
elif not player:
listitem.setInfo("video", {"Title": item.title})
# Añadido para Kodi Krypton (v17)
if config.get_platform(True)['num_version'] >= 17.0:
listitem.setArt({"poster": item.thumbnail})
def set_context_commands(item, parent_item):
"""
@@ -458,7 +466,12 @@ def play_video(item, strm=False, force_direct=False):
if item.channel == 'downloads':
logger.info("Reproducir video local: %s [%s]" % (item.title, item.url))
xlistitem = xbmcgui.ListItem(path=item.url, thumbnailImage=item.thumbnail)
xlistitem = xbmcgui.ListItem(path=item.url)
if config.get_platform(True)['num_version'] >= 16.0:
xlistitem.setArt({"thumb": item.thumbnail})
else:
xlistitem.setThumbnailImage(item.thumbnail)
set_infolabels(xlistitem, item, True)
xbmc.Player().play(item.url, xlistitem)
return
@@ -491,9 +504,16 @@ def play_video(item, strm=False, force_direct=False):
# se obtiene la información del video.
if not item.contentThumbnail:
xlistitem = xbmcgui.ListItem(path=mediaurl, thumbnailImage=item.thumbnail)
thumb = item.thumbnail
else:
xlistitem = xbmcgui.ListItem(path=mediaurl, thumbnailImage=item.contentThumbnail)
thumb = item.contentThumbnail
xlistitem = xbmcgui.ListItem(path=item.url)
if config.get_platform(True)['num_version'] >= 16.0:
xlistitem.setArt({"thumb": thumb})
else:
xlistitem.setThumbnailImage(thumb)
set_infolabels(xlistitem, item, True)
# si se trata de un vídeo en formato mpd, se configura el listitem para reproducirlo
@@ -695,7 +715,14 @@ def set_opcion(item, seleccion, opciones, video_urls):
if seleccion == -1:
# Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
listitem = xbmcgui.ListItem(item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail)
listitem = xbmcgui.ListItem(item.title)
if config.get_platform(True)['num_version'] >= 16.0:
listitem.setArt({'icon':"DefaultVideo.png", 'thumb': item.thumbnail})
else:
listitem.setIconImage("DefaultVideo.png")
listitem.setThumbnailImage(item.thumbnail)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, listitem)
# "Descargar"

View File

@@ -15,7 +15,7 @@
"patterns": [
{
"pattern": "cloudy.ec/(?:embed.php\\?id=|v/)([A-z0-9]+)",
"url": "https://www.cloudy.ec/embed.php?id=\\1"
"url": "https://www.cloudy.ec/embed.php?id=\\1&playerPage=1"
}
]
},
@@ -49,4 +49,4 @@
}
],
"version": 1
}
}

View File

@@ -16,6 +16,10 @@
{
"pattern": "(?s)https://drive.google.com/file/d/([^/]+)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)\"https://(?!docs)(.*?).googleusercontent.com/([^\"]+)",
"url": "https://\\1.googleusercontent.com/\\2"
}
]
},

View File

@@ -8,15 +8,16 @@ from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, ""
return True, ""
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
if "No+tienes+permiso" in response.data:
return False, "[gvideo] No tiene permiso para acceder a este video"
return False, "[gvideo] No tienes permiso para acceder a este video"
return True, ""
@@ -29,13 +30,9 @@ def get_video_url(page_url, user="", password="", video_password=""):
if 'googleusercontent' in page_url:
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
url=data.headers['location']
logger.debug('url: %s' % url)
logger.debug("data.headers: %s" % data.headers)
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
logger.debug('quality: %s' % quality)
streams.append((quality, url))
logger.debug('streams: %s' % streams)
headers_string=""
else:
@@ -46,6 +43,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
logger.info("Intel88 %s" %data)
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,

View File

@@ -52,7 +52,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
numeros = scrapertools.find_single_match(data, '_[A-f0-9]+x[A-f0-9]+\s*(?:=|\^)\s*([0-9]{4,}|0x[A-f0-9]{4,})')
op1, op2 = scrapertools.find_single_match(data, '\(0x(\d),0x(\d)\);')
idparse, hexparse = scrapertools.find_multiple_matches(data, "parseInt\('([0-9]+)'")
numeros = [numeros, str(int(hexparse, 8))]
# numeros = [numeros, str(int(hexparse, 8))]
rangos, rangos2 = scrapertools.find_single_match(data, "\)-([0-9]+).0x4\)/\(([0-9]+)")
videourl = ""
for encode in var_encodes:
text_decode = ""
@@ -81,11 +82,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if value3 < index1:
break
value4 = value2 ^ decode1[j % (mult / 8)] ^ int(idparse,8)
for n in numeros:
if not n.isdigit():
n = int(n, 16)
value4 ^= int(n)
# value4 = value2 ^ decode1[j % (mult / 8)] ^ int(idparse,8)
# for n in numeros:
# if not n.isdigit():
# n = int(n, 16)
# value4 ^= int(n)
value4 = value2 ^ decode1[(j % 9)] ^ (int(idparse, 8) - int(rangos) + 4) / (int(rangos2) - 8) ^ int(hexparse, 8)
value5 = index1 * 2 + 127
for h in range(4):
valorfinal = (value4 >> 8 * h) & (value5)

View File

@@ -12,8 +12,8 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
data = httptools.downloadpage(page_url, add_referer = True).data
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data or 'sources: []' in data:
return False, "[pelismundo] El archivo no existe o ha sido borrado"
return True, ""

View File

@@ -32,11 +32,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = scrapertools.find_single_match(data.replace('"', "'"), "sources\s*=[^\[]*\[([^\]]+)\]")
matches = scrapertools.find_multiple_matches(data, "[src|file]:'([^']+)'")
if len(matches) == 0:
matches = scrapertools.find_multiple_matches(data, "[^',]+")
video_urls = []
for video_url in matches:
if video_url.endswith(".mpd"):
continue
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{40,}')
hash = _hash[::-1]
hash = hash.replace(hash[1:2],"",1)
hash = hash.replace(hash[1:2], "", 1)
video_url = video_url.replace(_hash, hash)
filename = scrapertools.get_filename_from_url(video_url)[-4:]
@@ -56,107 +60,3 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls
def decrypt(h, k):
import base64
if len(h) % 4:
h += "=" * (4 - len(h) % 4)
sig = []
h = base64.b64decode(h.replace("-", "+").replace("_", "/"))
for c in range(len(h)):
sig += [ord(h[c])]
sec = []
for c in range(len(k)):
sec += [ord(k[c])]
dig = range(256)
g = 0
v = 128
for b in range(len(sec)):
a = (v + (sec[b] & 15)) % 256
c = dig[(g)]
dig[g] = dig[a]
dig[a] = c
g += 1
a = (v + (sec[b] >> 4 & 15)) % 256
c = dig[g]
dig[g] = dig[a]
dig[a] = c
g += 1
k = 0
q = 1
p = 0
n = 0
for b in range(512):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
q = 3
for a in range(v):
b = 255 - a
if dig[a] > dig[b]:
c = dig[a]
dig[a] = dig[b]
dig[b] = c
k = 0
for b in range(512):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
q = 5
for a in range(v):
b = 255 - a
if dig[a] > dig[b]:
c = dig[a]
dig[a] = dig[b]
dig[b] = c
k = 0
for b in range(512):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
q = 7
k = 0
u = 0
d = []
for b in range(len(dig)):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
u = dig[(n + dig[(k + dig[(u + p) % 256]) % 256]) % 256]
d += [u]
c = []
for f in range(len(d)):
try:
c += [(256 + (sig[f] - d[f])) % 256]
except:
break
h = ""
for s in c:
h += chr(s)
return h

View File

@@ -2,6 +2,7 @@
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -9,11 +10,11 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
if "Streaming link:" in data:
return True, ""
elif "Unfortunately, the file you want is not available." in data:
elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data:
return False, "[Uptobox] El archivo no existe o ha sido borrado"
wait = scrapertools.find_single_match(data, "You have to wait ([0-9]+) (minute|second)")
if len(wait) > 0:
@@ -27,20 +28,20 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("(page_url='%s')" % page_url)
# Si el enlace es directo de upstream
if "uptobox" not in page_url:
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
if "Video not found" in data:
page_url = page_url.replace("uptostream.com/iframe/", "uptobox.com/")
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
video_urls = uptobox(page_url, data)
else:
video_urls = uptostream(data)
else:
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
# Si el archivo tiene enlace de streaming se redirige a upstream
if "Streaming link:" in data:
page_url = "http://uptostream.com/iframe/" + scrapertools.find_single_match(page_url,
'uptobox.com/([a-z0-9]+)')
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
video_urls = uptostream(data)
else:
# Si no lo tiene se utiliza la descarga normal
@@ -76,7 +77,7 @@ def uptobox(url, data):
for inputname, inputvalue in matches:
post += inputname + "=" + inputvalue + "&"
data = scrapertools.cache_page(url, post=post[:-1])
data = httptools.downloadpage(url, post=post[:-1]).data
media = scrapertools.find_single_match(data, '<a href="([^"]+)">\s*<span class="button_upload green">')
# Solo es necesario codificar la ultima parte de la url
url_strip = urllib.quote(media.rsplit('/', 1)[1])

View File

@@ -48,5 +48,6 @@
"visible": false
}
],
"thumbnail": "https://s26.postimg.org/vo685y2bt/vimeo1.png",
"version": 1
}
}