This commit is contained in:
Danielr460
2017-10-15 09:30:32 -05:00
54 changed files with 1132 additions and 1115 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.2.0" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.2.2" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,11 +19,19 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» playmax » allcalidad
» cinetux » allpeliculas
» pedropolis » pelisplanet
» flashx » gvideo
¤ selector de temas ¤ arreglos internos
» maxipelis » peliculasaudiolatino
» peliculasmx » peliscity
» repelis » seriesmeme
» seriesyonkis » verpeliculasnuevas
» zonatorrent » kabagi/diskokosmico
» tiotorrent » allcalidad
» areadocumental » cinetux
» hdfull » newpct1
» ohpelis » animeyt
» flashx » kbagi
» gamovideo » vidup
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]RIgodonius[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -92,9 +92,14 @@ def peliculas(item):
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumbnail, titulo, varios in matches:
idioma = scrapertools.find_single_match(varios, '(?s)Idioma.*?kinopoisk">([^<]+)')
number_idioma = scrapertools.find_single_match(idioma, '[0-9]')
mtitulo = titulo
if number_idioma != "":
idioma = ""
else:
mtitulo += " (" + idioma + ")"
year = scrapertools.find_single_match(varios, 'Año.*?kinopoisk">([^<]+)')
year = scrapertools.find_single_match(year, '[0-9]{4}')
mtitulo = titulo + " (" + idioma + ")"
if year:
mtitulo += " (" + year + ")"
item.infoLabels['year'] = int(year)

View File

@@ -21,12 +21,12 @@ CHANNEL_DEFAULT_HEADERS = [
REGEX_NEXT_PAGE = r"class='current'>\d+?</li><li><a href=\"([^']+?)\""
REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.me/uploads/thumbs/[^"]+?)"'
REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
REGEX_URL = r'href="(http://animeflv\.me/Anime/[^"]+)">'
REGEX_URL = r'href="(http://animeflv\.co/Anime/[^"]+)">'
REGEX_SERIE = r'%s.+?%s([^<]+?)</a><p>(.+?)</p>' % (REGEX_THUMB, REGEX_URL)
REGEX_EPISODE = r'href="(http://animeflv\.me/Ver/[^"]+?)">(?:<span.+?</script>)?(.+?)</a></td><td>(\d+/\d+/\d+)</td></tr>'
REGEX_GENERO = r'<a href="(http://animeflv\.me/genero/[^\/]+/)">([^<]+)</a>'
REGEX_EPISODE = r'href="(http://animeflv\.co/Ver/[^"]+?)">(?:<span.+?</script>)?(.+?)</a></td><td>(\d+/\d+/\d+)</td></tr>'
REGEX_GENERO = r'<a href="(http://animeflv\.co/genero/[^\/]+/)">([^<]+)</a>'
def get_url_contents(url):
@@ -309,7 +309,7 @@ def findvideos(item):
itemlist = []
page_html = get_url_contents(item.url)
regex_api = r'http://player\.animeflv\.me/[^\"]+'
regex_api = r'http://player\.animeflv\.co/[^\"]+'
iframe_url = scrapertools.find_single_match(page_html, regex_api)
iframe_html = get_url_contents(iframe_url)

View File

@@ -0,0 +1,36 @@
{
"id": "animeyt",
"name": "AnimeYT",
"active": true,
"adult": false,
"language": "es",
"thumbnail": "http://i.imgur.com/dHpupFk.png",
"version": 1,
"changes": [
{
"date": "17/05/2017",
"description": "Fix novedades y replace en findvideos"
}
],
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "información extra",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,187 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from platformcode import config,logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt')
HOST = "http://animeyt.tv/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST))
itemlist.append(Item(channel=item.channel, title="Recientes", action="recientes", url=HOST))
itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico", url=HOST))
itemlist.append(Item(channel=item.channel, title="Búsqueda", action="search", url=urlparse.urljoin(HOST, "busqueda?terminos=")))
return itemlist
def novedades(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_novedades = '<div class="capitulos-portada">[\s\S]+?<h2>Comentarios</h2>'
data_novedades = scrapertools.find_single_match(data, patron_novedades)
patron = 'href="([^"]+)"[\s\S]+?src="([^"]+)"[^<]+alt="([^"]+) (\d+)([^"]+)'
matches = scrapertools.find_multiple_matches(data_novedades, patron)
for url, img, scrapedtitle, eps, info in matches[item.pagina:item.pagina + 20]:
title = scrapedtitle + " " + "1x" + eps + info
title = title.replace("Sub Español", "").replace("sub español", "")
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, title=title, url=url, thumb=img, action="findvideos", contentTitle=scrapedtitle, contentSerieName=scrapedtitle, infoLabels=infoLabels, contentType="tvshow"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for it in itemlist:
it.thumbnail = it.thumb
except:
pass
if len(matches) > item.pagina + 20:
pagina = item.pagina + 20
itemlist.append(item.clone(channel=item.channel, action="novedades", url=item.url, title=">> Página Siguiente", pagina=pagina))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ':
titulo = letra
if letra == "0":
letra = "num"
itemlist.append(Item(channel=item.channel, action="recientes", title=titulo,
url=urlparse.urljoin(HOST, "animes?tipo=0&genero=0&anio=0&letra={letra}".format(letra=letra))))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return recientes(item)
def recientes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_recientes = '<article class="anime">[\s\S]+?</main>'
data_recientes = scrapertools.find_single_match(data, patron_recientes)
patron = '<a href="([^"]+)"[^<]+<img src="([^"]+)".+?js-synopsis-reduce">(.*?)<.*?<h3 class="anime__title">(.*?)<small>(.*?)</small>'
matches = scrapertools.find_multiple_matches(data_recientes, patron)
for url, thumbnail, plot, title, cat in matches:
itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
paginacion = scrapertools.find_single_match(data, '<a class="pager__link icon-derecha last" href="([^"]+)"')
paginacion = scrapertools.decodeHtmlentities(paginacion)
if paginacion:
itemlist.append(Item(channel=item.channel, action="recientes", title=">> Página Siguiente", url=paginacion))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<span class="icon-triangulo-derecha"></span>.*?<a href="([^"]+)">([^"]+) (\d+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, scrapedtitle, episode in matches:
title = "1x" + episode + " " + "Episodio"
itemlist.append(item.clone(title=title, url=url, action='findvideos'))
if config.get_videolibrary_support:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
duplicados = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'Player\("(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
if "cldup" in url:
title = "Opcion Cldup"
if "chumi" in url:
title = "Opcion Chumi"
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=url))
if item.extra != "library":
if config.get_videolibrary_support() and item.extra:
itemlist.append(item.clone(channel=item.channel, title="[COLOR yellow]Añadir pelicula a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="library", contentTitle=item.show, contentType="movie"))
return itemlist
def player(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, add_referer=True).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
url = scrapertools.find_single_match(data, 'sources: \[{file:\'(.*?)\'')
itemlist = servertools.find_video_items(data=data)
return itemlist

View File

@@ -23,12 +23,12 @@ def mainlist(item):
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Novedades", action="entradas",
url="http://www.area-documental.com/resultados-reciente.php?buscar=&genero=",
url= host + "/resultados-reciente.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Destacados", action="entradas",
url="http://www.area-documental.com/resultados-destacados.php?buscar=&genero=",
url= host + "/resultados-destacados.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Categorías", action="cat", url="http://www.area-documental.com/index.php",
itemlist.append(item.clone(title="Categorías", action="cat", url= host + "/index.php",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Ordenados por...", action="indice", fanart="http://i.imgur.com/Q7fsFI6.png"))
@@ -47,7 +47,7 @@ def configuracion(item):
def search(item, texto):
logger.info()
item.url = "http://www.area-documental.com/resultados.php?buscar=%s&genero=&x=0&y=0" % texto
item.url = host + "/resultados.php?buscar=%s&genero=&x=0&y=0" % texto
item.action = "entradas"
try:
itemlist = entradas(item)
@@ -65,7 +65,7 @@ def newest(categoria):
item = Item()
try:
if categoria == "documentales":
item.url = "http://www.area-documental.com/resultados-reciente.php?buscar=&genero="
item.url = host + "/resultados-reciente.php?buscar=&genero="
item.action = "entradas"
itemlist = entradas(item)
@@ -86,9 +86,9 @@ def indice(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Título", action="entradas",
url="http://www.area-documental.com/resultados-titulo.php?buscar=&genero="))
url= host + "/resultados-titulo.php?buscar=&genero="))
itemlist.append(item.clone(title="Año", action="entradas",
url="http://www.area-documental.com/resultados-anio.php?buscar=&genero="))
url= host + "/resultados-anio.php?buscar=&genero="))
return itemlist
@@ -125,9 +125,13 @@ def entradas(item):
data2 = ""
data = data.replace("\n", "").replace("\t", "")
patron = '<div id="peliculas">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?' \
'target="_blank">(.*?)</a>(.*?)<p>(.*?)</p>' \
'.*?</strong>: (.*?)<strong>.*?</strong>(.*?)</div>'
patron = '(?s)<div id="peliculas">.*?a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += 'target="_blank">(.*?)</a></span>'
patron += '(.*?)<p>'
patron += '(.*?)</p>.*?'
patron += '</strong>:(.*?)<strong>.*?'
patron += '</strong>(.*?)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot, genero, extra in matches:
infolab = {'plot': scrapedplot, 'genre': genero}
@@ -200,6 +204,5 @@ def play(item):
extension = item.url.rsplit("|", 1)[0][-4:]
itemlist.append(['%s %s [directo]' % (extension, item.calidad), item.url, 0, subtitle])
# itemlist.append(item.clone(subtitle=subtitle))
return itemlist

View File

@@ -127,7 +127,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title.strip()
patron += '<span class="icon-star2"></span>(.*?)/div>.*?' # rating
@@ -144,14 +143,17 @@ def peliculas(item):
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
scrapedtitle, year, quality)
thumb_id = scrapertools.find_single_match(scrapedthumbnail, '.*?\/uploads\/(.*?)-')
thumbnail = "/%s.jpg" % thumb_id
filtro_list = {"poster_path": thumbnail}
filtro_list = filtro_list.items()
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year, 'rating': rating},
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
url=scrapedurl, infoLabels={'filtro':filtro_list},
contentTitle=contentTitle, thumbnail=thumbnail,
title=title, context="buscar_trailer", quality = quality))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 20 < len(matches):
itemlist.append(item.clone(page=item.page + 20,

View File

@@ -689,7 +689,7 @@ def get_enlaces(item, url, type):
if servertools.is_server_enabled(server):
scrapedtitle = " Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]"
itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2,
extra="", server=server))
extra="", server=server, language=idioma))
if len(itemlist) == 1:
itemlist.append(item.clone(title=" No hay enlaces disponibles", action="", text_color=color2))

View File

@@ -169,6 +169,7 @@ def findvideos(item):
videoitem.plot = info
videoitem.action = "play"
videoitem.folder = False
videoitem.infoLabels=item.infoLabels
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(

View File

@@ -298,6 +298,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "player" in url:
scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
if "ok" in scrapedserver: scrapedserver = "okru"
matches.append([url, scrapedserver, "", language.strip(), t_tipo])
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t", "").replace("\r", "")
@@ -347,10 +348,12 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
def play(item):
logger.info()
itemlist = []
if "api.cinetux" in item.url:
if "api.cinetux" in item.url or item.server == "okru":
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
if item.server == "okru":
item.url = "https://ok.ru/videoembed/" + id
elif "links" in item.url or "www.cinetux.me" in item.url:
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')

View File

@@ -3,24 +3,21 @@
# Alfa
# ------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
import re
from core import httptools
from core import tmdb
from core import jsontools as json
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="peliculas", url="http://gnula.mobi/"))
itemlist.append(item.clone(title="Castellano", action="peliculas",
url="http://www.gnula.mobi/tag/esp)anol/"))
url="http://www.gnula.mobi/tag/espanol/"))
itemlist.append(item.clone(title="Latino", action="peliculas", url="http://gnula.mobi/tag/latino/"))
itemlist.append(item.clone(title="VOSE", action="peliculas", url="http://gnula.mobi/tag/subtitulada/"))
@@ -53,43 +50,66 @@ def sub_search(item):
patron = '<div class="row">.*?<a href="([^"]+)" title="([^"]+)">.*?<img src="(.*?)" title'
matches = scrapertools.find_multiple_matches(data, patron)
for url,name,img in matches:
itemlist.append(item.clone(title=name, url=url, action="findvideos", show=name, thumbnail=img))
for url, name, img in matches:
itemlist.append(item.clone(title=name, url=url, action="findvideos", thumbnail=img))
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="glyphicon '
'glyphicon-chevron-right" aria-hidden="true"></i>')
if paginacion:
itemlist.append(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion)
itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>", url=paginacion))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)')
thumbnail = scrapedthumbnail
new_item =Item (channel = item.channel, action="findvideos", title=title, contentTitle=title, url=url,
thumbnail=thumbnail, infoLabels = {'year':year})
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle = scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels={'year': year}))
itemlist.append(new_item)
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="(.*?)"\/>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="(.*?)"')
if next_page_url != "":
next_page_url = item.url + next_page_url
itemlist.append(item.clone(action="peliculas", title="Siguiente >>", text_color="yellow",
url=next_page_url))
url=next_page_url))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'data-src="([^"]+)".*?'
patron += 'data-toggle="tab">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, language in matches:
url = url.replace("&amp;", "&")
response = httptools.downloadpage(url, follow_redirects=False, add_referer=True)
if response.data:
url = scrapertools.find_single_match(response.data, 'src="([^"]+)"')
else:
url = response.headers.get("location", "")
url = url.replace("&quot","")
titulo = "Ver en %s (" + language + ")"
itemlist.append(item.clone(
action = "play",
title = titulo,
url = url,
language = language))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -56,11 +56,17 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
language = []
plot = scrapertools.htmlclean(resto).strip()
logger.debug('plot: %s' % plot)
languages = scrapertools.find_multiple_matches(plot, r'\((V.)\)')
quality = scrapertools.find_single_match(plot, r'(?:\[.*?\].*?)\[(.*?)\]')
for lang in languages:
language.append(lang)
logger.debug('languages: %s' % languages)
title = scrapedtitle + " " + plot
contentTitle = scrapedtitle
url = item.url + scrapedurl
@@ -73,7 +79,9 @@ def peliculas(item):
hasContentDetails = True,
contentTitle = contentTitle,
contentType = "movie",
context = ["buscar_trailer"]
context = ["buscar_trailer"],
language=language,
quality=quality
))
return itemlist

View File

@@ -49,8 +49,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url=host, folder=True))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
if not account:
itemlist.append(Item(channel=item.channel, title=bbcode_kodi2html(
"[COLOR orange][B]Habilita tu cuenta para activar los items de usuario...[/B][/COLOR]"),
itemlist.append(Item(channel=item.channel, title="[COLOR orange][B]Habilita tu cuenta para activar los items de usuario...[/B][/COLOR]",
action="settingCanal", url=""))
else:
login()
@@ -66,10 +65,10 @@ def menupeliculas(item):
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Favoritos[/B][/COLOR]"),
title="[COLOR orange][B]Favoritos[/B][/COLOR]",
url=host + "/a/my?target=movies&action=favorite&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Pendientes[/B][/COLOR]"),
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True))
@@ -86,7 +85,7 @@ def menupeliculas(item):
itemlist.append(Item(channel=item.channel, action="generos", title="Películas por Género", url=host, folder=True))
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"),
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=movies&action=seen&start=-28&limit=28", folder=True))
return itemlist
@@ -99,10 +98,10 @@ def menuseries(item):
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Siguiendo[/B][/COLOR]"),
title="[COLOR orange][B]Siguiendo[/B][/COLOR]",
url=host + "/a/my?target=shows&action=following&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Para Ver[/B][/COLOR]"),
title="[COLOR orange][B]Para Ver[/B][/COLOR]",
url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True))
@@ -123,13 +122,13 @@ def menuseries(item):
url=host + "/series/list", folder=True))
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Favoritas[/B][/COLOR]"),
title="[COLOR orange][B]Favoritas[/B][/COLOR]",
url=host + "/a/my?target=shows&action=favorite&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Pendientes[/B][/COLOR]"),
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=shows&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"),
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=shows&action=seen&start=-28&limit=28", folder=True))
return itemlist
@@ -222,7 +221,7 @@ def items_usuario(item):
serie = ficha['show_title']['en'].strip()
temporada = ficha['season']
episodio = ficha['episode']
serie = bbcode_kodi2html("[COLOR whitesmoke][B]" + serie + "[/B][/COLOR]")
serie = "[COLOR whitesmoke][B]" + serie + "[/B][/COLOR]"
if len(episodio) == 1: episodio = '0' + episodio
try:
title = temporada + "x" + episodio + " - " + serie + ": " + title
@@ -286,9 +285,8 @@ def fichas(item):
if len(s_p) == 1:
data = s_p[0]
if 'Lo sentimos</h3>' in s_p[0]:
return [Item(channel=item.channel, title=bbcode_kodi2html(
"[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20',
' ') + "[/COLOR] sin resultados"))]
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20',
' ') + "[/COLOR] sin resultados")]
else:
data = s_p[0] + s_p[1]
else:
@@ -321,12 +319,12 @@ def fichas(item):
if scrapedlangs != ">":
textoidiomas, language = extrae_idiomas(scrapedlangs)
#Todo Quitar el idioma
title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])")
title += " ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])"
if scrapedrating != ">":
valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>', r'\1,\2', scrapedrating)
infoLabels['rating']=valoracion
title += bbcode_kodi2html(" ([COLOR orange]" + valoracion + "[/COLOR])")
title += " ([COLOR orange]" + valoracion + "[/COLOR])"
url = urlparse.urljoin(item.url, scrapedurl)
@@ -346,7 +344,7 @@ def fichas(item):
if item.title == "Buscar...":
tag_type = scrapertools.get_match(url, 'l.tv/([^/]+)/')
title += bbcode_kodi2html(" - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]")
title += " - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]"
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
@@ -388,7 +386,7 @@ def episodios(item):
str = get_status(status, "shows", id)
if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=False))
@@ -397,11 +395,11 @@ def episodios(item):
thumbnail=item.thumbnail, show=item.show, folder=True))
elif account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=False))
title = bbcode_kodi2html(" ( [COLOR orange][B]Seguir[/B][/COLOR] )")
title = " ( [COLOR orange][B]Seguir[/B][/COLOR] )"
itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
@@ -436,7 +434,7 @@ def episodios(item):
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
idiomas += "[/B][/COLOR])"
idiomas = bbcode_kodi2html(idiomas)
idiomas = idiomas
else:
idiomas = ""
@@ -513,7 +511,7 @@ def novedades_episodios(item):
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
idiomas += "[/B][/COLOR])"
idiomas = bbcode_kodi2html(idiomas)
idiomas = idiomas
else:
idiomas = ""
@@ -522,7 +520,7 @@ def novedades_episodios(item):
except:
show = episode['show']['title']['en'].strip()
show = bbcode_kodi2html("[COLOR whitesmoke][B]" + show + "[/B][/COLOR]")
show = "[COLOR whitesmoke][B]" + show + "[/B][/COLOR]"
if episode['title']:
try:
@@ -610,8 +608,9 @@ def generos_series(item):
def findvideos(item):
logger.info()
itemlist = []
it1 = []
it2 = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
@@ -623,21 +622,21 @@ def findvideos(item):
item.url = item.url.split("###")[0]
if type == "2" and account and item.category != "Cine":
title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
title = " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )"
if "Favorito" in item.title:
title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
title = " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )"
if config.get_videolibrary_support():
title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
it1.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))
title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )")
title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="trailer", title=title_label, fulltitle=title_label, url=url_targets,
it1.append(
item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
thumbnail=item.thumbnail, show=item.show))
itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
@@ -663,7 +662,6 @@ def findvideos(item):
infolabels = {}
year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
infolabels["year"] = year
matches = []
for match in data_decrypt:
prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
@@ -676,93 +674,43 @@ def findvideos(item):
matches.append([match["lang"], match["quality"], url, embed])
enlaces = []
for idioma, calidad, url, embed in matches:
servername = scrapertools.find_single_match(url, "(?:http:|https:)//(?:www.|)([^.]+).")
if servername == "streamin": servername = "streaminto"
if servername == "waaw": servername = "netutv"
if servername == "uploaded" or servername == "ul": servername = "uploadedto"
mostrar_server = True
if config.get_setting("hidepremium") == True:
mostrar_server = servertools.is_server_enabled(servername)
if mostrar_server:
option = "Ver"
if re.search(r'return ([\'"]{2,}|\})', embed):
option = "Descargar"
calidad = unicode(calidad, "utf8").upper().encode("utf8")
servername_c = unicode(servername, "utf8").capitalize().encode("utf8")
title = option + ": " + servername_c + " (" + calidad + ")" + " (" + idioma + ")"
thumbnail = item.thumbnail
plot = item.title + "\n\n" + scrapertools.find_single_match(data,
'<meta property="og:description" content="([^"]+)"')
plot = scrapertools.htmlclean(plot)
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
option = "Ver"
option1 = 1
if re.search(r'return ([\'"]{2,}|\})', embed):
option = "Descargar"
option1 = 2
calidad = unicode(calidad, "utf8").upper().encode("utf8")
title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
thumbnail = item.thumbnail
plot = item.title + "\n\n" + scrapertools.find_single_match(data,
'<meta property="og:description" content="([^"]+)"')
plot = scrapertools.htmlclean(plot)
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
enlaces.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, server=servername, infoLabels=infolabels,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option))
it2.append(
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.title, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
enlaces.sort(key=lambda it: it.tipo, reverse=True)
itemlist.extend(enlaces)
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
itemlist.extend(it1)
itemlist.extend(it2)
## 2 = película
if type == "2" and item.category != "Cine":
## STRM para todos los enlaces de servidores disponibles
## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la videoteca..."
try:
itemlist.extend(file_cine_library(item, url_targets))
except:
pass
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist
def trailer(item):
import youtube
itemlist = []
item.url = "https://www.googleapis.com/youtube/v3/search" + \
"?q=" + item.show.replace(" ", "+") + "+trailer+HD+Español" \
"&regionCode=ES" + \
"&part=snippet" + \
"&hl=es_ES" + \
"&key=AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA" + \
"&type=video" + \
"&maxResults=50" + \
"&pageToken="
itemlist.extend(youtube.fichas(item))
# itemlist.pop(-1)
return itemlist
def file_cine_library(item, url_targets):
import os
from core import filetools
videolibrarypath = os.path.join(config.get_videolibrary_path(), "CINE")
archivo = item.show.strip()
strmfile = archivo + ".strm"
strmfilepath = filetools.join(videolibrarypath, strmfile)
if not os.path.exists(strmfilepath):
itemlist = []
itemlist.append(Item(channel=item.channel, title=">> Añadir a la videoteca...", url=url_targets,
action="add_file_cine_library", extra="episodios", show=archivo))
return itemlist
def add_file_cine_library(item):
from core import videolibrarytools
new_item = item.clone(title=item.show, action="play_from_library")
videolibrarytools.save_movie(new_item)
itemlist = []
itemlist.append(Item(title='El vídeo ' + item.show + ' se ha añadido a la videoteca'))
# xbmctools.renderItems(itemlist, "", "", "")
platformtools.render_items(itemlist, "")
return
def play(item):
if "###" in item.url:
@@ -780,13 +728,11 @@ def play(item):
if devuelve:
item.url = devuelve[0][1]
item.server = devuelve[0][2]
item.thumbnail = item.contentThumbnail
item.fulltitle = item.contentTitle
return [item]
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------
def agrupa_datos(data):
## Agrupa los datos
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|<!--.*?-->', '', data)
@@ -810,22 +756,6 @@ def extrae_idiomas(bloqueidiomas):
return textoidiomas, language
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>')
text = text.replace('[CR]', '<br>')
text = re.sub(r'\[([^\]]+)\]',
r'<\1>',
text)
text = text.replace('"color: white"', '"color: auto"')
return text
## --------------------------------------------------------------------------------
def set_status(item):
@@ -853,7 +783,7 @@ def set_status(item):
data = httptools.downloadpage(host + path, post=post).data
title = bbcode_kodi2html("[COLOR green][B]OK[/B][/COLOR]")
title = "[COLOR green][B]OK[/B][/COLOR]"
return [Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=item.url,
thumbnail=item.thumbnail, show=item.show, folder=False)]
@@ -871,15 +801,14 @@ def get_status(status, type, id):
try:
if id in status['favorites'][type]:
str1 = bbcode_kodi2html(" [COLOR orange][B]Favorito[/B][/COLOR]")
str1 = " [COLOR orange][B]Favorito[/B][/COLOR]"
except:
str1 = ""
try:
if id in status['status'][type]:
str2 = state[status['status'][type][id]]
if str2 != "": str2 = bbcode_kodi2html(
" [COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]")
if str2 != "": str2 = "[COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]"
except:
str2 = ""

View File

@@ -1,22 +1,10 @@
{
"id": "copiapop",
"name": "Copiapop/Diskokosmiko",
"id": "kbagi",
"name": "Kbagi/Diskokosmiko",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
"changes": [
{
"date": "15/03/2017",
"autor": "SeiTaN",
"description": "limpieza código"
},
{
"date": "16/02/2017",
"autor": "Cmos",
"description": "Primera versión"
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"banner": "copiapop.png",
"categories": [
@@ -33,19 +21,19 @@
"visible": true
},
{
"id": "copiapopuser",
"id": "kbagiuser",
"type": "text",
"color": "0xFF25AA48",
"label": "Usuario Copiapop",
"label": "Usuario Kbagi",
"enabled": true,
"visible": true
},
{
"id": "copiapoppassword",
"id": "kbagipassword",
"type": "text",
"color": "0xFF25AA48",
"hidden": true,
"label": "Password Copiapop",
"label": "Password Kbagi",
"enabled": "!eq(-1,'')",
"visible": true
},

View File

@@ -9,7 +9,7 @@ from core import scrapertools
from core.item import Item
from platformcode import config, logger
__perfil__ = config.get_setting('perfil', "copiapop")
__perfil__ = config.get_setting('perfil', "kbagi")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
@@ -21,20 +21,20 @@ if __perfil__ - 1 >= 0:
else:
color1 = color2 = color3 = color4 = color5 = ""
adult_content = config.get_setting("adult_content", "copiapop")
adult_content = config.get_setting("adult_content", "kbagi")
def login(pagina):
logger.info()
try:
user = config.get_setting("%suser" % pagina.split(".")[0], "copiapop")
password = config.get_setting("%spassword" % pagina.split(".")[0], "copiapop")
if pagina == "copiapop.com":
user = config.get_setting("%suser" % pagina.split(".")[0], "kbagi")
password = config.get_setting("%spassword" % pagina.split(".")[0], "kbagi")
if pagina == "kbagi.com":
if user == "" and password == "":
return False, "Para ver los enlaces de copiapop es necesario registrarse en copiapop.com"
return False, "Para ver los enlaces de kbagi es necesario registrarse en kbagi.com"
elif user == "" or password == "":
return False, "Copiapop: Usuario o contraseña en blanco. Revisa tus credenciales"
return False, "kbagi: Usuario o contraseña en blanco. Revisa tus credenciales"
else:
if user == "" or password == "":
return False, "DiskoKosmiko: Usuario o contraseña en blanco. Revisa tus credenciales"
@@ -65,19 +65,19 @@ def mainlist(item):
itemlist = []
item.text_color = color1
logueado, error_message = login("copiapop.com")
logueado, error_message = login("kbagi.com")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://copiapop.com"
itemlist.append(item.clone(title="Copiapop", action="", text_color=color2))
item.extra = "http://kbagi.com"
itemlist.append(item.clone(title="kbagi", action="", text_color=color2))
itemlist.append(
item.clone(title=" Búsqueda", action="search", url="http://copiapop.com/action/SearchFiles"))
item.clone(title=" Búsqueda", action="search", url="http://kbagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://copiapop.com/action/home/MoreNewestCollections?pageNumber=1"))
url="http://kbagi.com/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://copiapop.com/action/SearchFiles"))
url="http://kbagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
item.extra = "http://diskokosmiko.mx/"
@@ -90,7 +90,7 @@ def mainlist(item):
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
itemlist.append(item.clone(action="", title=""))
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_copiapop')
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_kbagi')
files = filetools.listdir(folder_thumb)
if files:
itemlist.append(
@@ -133,7 +133,7 @@ def listado(item):
data = httptools.downloadpage(item.url, item.post).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
patron = '<div class="size">(.*?)</div></div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
@@ -204,7 +204,7 @@ def findvideos(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="copiapop"))
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="kbagi"))
usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
url_usuario = item.extra + "/" + usuario
@@ -265,7 +265,7 @@ def colecciones(item):
matches = matches[:20]
index = 20
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
for url, scrapedtitle, thumb, info in matches:
url = item.extra + url + "/gallery,1,1?ref=pager"
title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
@@ -313,7 +313,7 @@ def cuenta(item):
import urllib
itemlist = []
web = "copiapop"
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
logueado, error_message = login("diskokosmiko.mx")
@@ -321,7 +321,7 @@ def cuenta(item):
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
return itemlist
user = config.get_setting("%suser" % web, "copiapop")
user = config.get_setting("%suser" % web, "kbagi")
user = unicode(user, "utf8").lower().encode("utf8")
url = item.extra + "/" + urllib.quote(user)
data = httptools.downloadpage(url).data
@@ -364,7 +364,7 @@ def filtro(item):
'type': 'text', 'default': '0', 'visible': True})
# Se utilizan los valores por defecto/guardados
web = "copiapop"
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel)
@@ -378,7 +378,7 @@ def filtro(item):
def filtrado(item, values):
values_copy = values.copy()
web = "copiapop"
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
# Guarda el filtro para que sea el que se cargue por defecto
@@ -407,7 +407,7 @@ def download_thumb(filename, url):
lock = threading.Lock()
lock.acquire()
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
if not filetools.exists(folder):
filetools.mkdir(folder)
lock.release()
@@ -419,7 +419,7 @@ def download_thumb(filename, url):
def delete_cache(url):
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
filetools.rmdirtree(folder)
if config.is_xbmc():
import xbmc

View File

@@ -104,9 +104,10 @@ def peliculas(item):
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = contentTitle , infoLabels={'year':year} )
if year:
tmdb.set_infoLabels_item(new_item)
#if year:
# tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
try:
patron = '<a href="([^"]+)" ><span class="icon-chevron-right"></span></a></div>'
next_page = re.compile(patron,re.DOTALL).findall(data)

View File

@@ -339,20 +339,20 @@ def episodios(item):
infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug('data: %s'%data)
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
pagination = scrapertools.find_single_match(data, pattern)
if pagination:
pattern = '<li><a href="([^"]+)">Last<\/a>'
full_url = scrapertools.find_single_match(pagination, pattern)
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
list_pages = []
for x in range(1, int(last_page) + 1):
list_pages.append("%s%s" % (url, x))
list_pages = [item.url]
for x in range(2, int(last_page) + 1):
response = httptools.downloadpage('%s%s'% (url,x))
if response.sucess:
list_pages.append("%s%s" % (url, x))
else:
list_pages = [item.url]
logger.debug ('pattern: %s'%pattern)
for index, page in enumerate(list_pages):
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
@@ -424,7 +424,7 @@ def episodios(item):
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
return itemlist
def search(item, texto):

View File

@@ -14,18 +14,19 @@ from core.item import Item
from platformcode import config, logger
host = 'http://www.ohpelis.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110',
'Referer': host}
def mainlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)<\/a> <i>(\d+)<\/i>'
matches = scrapertools.find_multiple_matches(data, patron)
mcantidad = 0
for scrapedurl, scrapedtitle, cantidad in matches:
mcantidad += int(cantidad)
itemlist.append(
item.clone(title="Peliculas",
item.clone(title="Peliculas (%s)" %mcantidad,
action='movies_menu'
))
@@ -95,14 +96,14 @@ def list_all(item):
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
title = scrapedtitle
plot = scrapedplot
thumbnail = scrapedthumbnail
url = scrapedurl
year = scrapedyear
new_item = (item.clone(title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fulltitle=title,
contentTitle=title,
infoLabels={'year': year}
))
if item.extra == 'serie':
@@ -114,7 +115,7 @@ def list_all(item):
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
tmdb.set_infoLabels(itemlist, True)
# Paginacion
next_page = scrapertools.find_single_match(data, '<link rel="next" href="(.*?) />')
if next_page:
@@ -162,7 +163,6 @@ def search_list(item):
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype, scrapedyear, scrapedplot in matches:
title = scrapedtitle
plot = scrapedplot
thumbnail = scrapedthumbnail
url = scrapedurl
year = scrapedyear
@@ -170,7 +170,6 @@ def search_list(item):
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
infoLabels={'year': year})
if scrapedtype == 'movies':
new_item.action = 'findvideos'
@@ -275,11 +274,14 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.contentTitle = item.fulltitle
videoitem.infoLabels = item.infoLabels
if videoitem.server != 'youtube':
videoitem.title = item.title + ' (%s)' % videoitem.server
else:
videoitem.title = 'Trailer en %s' % videoitem.server
videoitem.action = 'play'
videoitem.server = ""
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
@@ -288,9 +290,9 @@ def findvideos(item):
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle,
))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
@@ -314,3 +316,8 @@ def newest(categoria):
return []
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -120,40 +120,51 @@ def peliculas(item):
if len(matches_next_page) > 0:
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches:
if 'Proximamente' not in calidad:
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches:
if 'Proximamente' not in quality:
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
'Español Latino', '').strip()
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad)
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle,
infoLabels={'year': year, 'rating': rating}, thumbnail=scrapedthumbnail,
url=scrapedurl, next_page=next_page, quality=calidad, title=title)
if year:
tmdb.set_infoLabels_item(new_item, __modo_grafico__)
itemlist.append(new_item)
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
url=scrapedurl, next_page=next_page, quality=quality, title=title))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
thumbnail=get_thumb("next.png")))
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
item.fanart = scrapertools.find_single_match(data,
"<meta property='og:image' content='([^']+)' />").replace(
'w780', 'original')
item.plot = scrapertools.find_single_match(data, '<div itemprop="description" class="wp-content">.*?<p>(['
'^<]+)</p>')
item.plot = scrapertools.htmlclean(item.plot)
item.infoLabels['director'] = scrapertools.find_single_match(data,
'<div class="name"><a href="[^"]+">([^<]+)</a>')
item.infoLabels['rating'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>([^<]+)</strong>')
item.infoLabels['votes'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>['
'^<]+</strong>\s(.*?) votos</b>')
for no_plot in itemlist:
if no_plot.infoLabels['plot'] == '':
thumb_id = scrapertools.find_single_match(no_plot.thumbnail, '.*?\/\d{2}\/(.*?)-')
thumbnail = "/%s.jpg" % thumb_id
filtro_list = {"poster_path": thumbnail}
filtro_list = filtro_list.items()
no_plot.infoLabels={'filtro':filtro_list}
tmdb.set_infoLabels_item(no_plot, __modo_grafico__)
if no_plot.infoLabels['plot'] == '':
data = httptools.downloadpage(no_plot.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
no_plot.fanart = scrapertools.find_single_match(data,
"<meta property='og:image' content='([^']+)' />").replace(
'w780', 'original')
no_plot.plot = scrapertools.find_single_match(data, '<div itemprop="description" '
'class="wp-content">.*?<p>(['
'^<]+)</p>')
no_plot.plot = scrapertools.htmlclean(no_plot.plot)
no_plot.infoLabels['director'] = scrapertools.find_single_match(data,
'<div class="name"><a href="[^"]+">([^<]+)</a>')
no_plot.infoLabels['rating'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>(['
'^<]+)</strong>')
no_plot.infoLabels['votes'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>['
'^<]+</strong>\s(.*?) votos</b>')
return itemlist

View File

@@ -155,6 +155,8 @@ def findvideos(item):
url = scrapedurl
server = servertools.get_server_name(servidor)
title = "Enlace encontrado en %s" % (server)
if idioma == 'Ingles Subtitulado':
idioma = 'vose'
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=scrapedthumbnail, language=idioma, quality=calidad, server=server))
if itemlist:

View File

@@ -76,14 +76,11 @@ def peliculas(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Extrae la marca de siguiente página
paginador = scrapertools.find_single_match(data, "<div class='paginado'>.*?lateral")
next_page = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="(.*?)">')
patron = "<li.*?<a class='current'>.*?href='([^']+)"
scrapedurl = scrapertools.find_single_match(paginador, patron)
if scrapedurl:
if next_page:
scrapedtitle = "!Pagina Siguiente ->"
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, folder=True))
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=next_page, folder=True))
return itemlist

View File

@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger, config
@@ -16,117 +16,54 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="Recientes", url=host))
itemlist.append(Item(channel=item.channel, action="PorFecha", title="Año de Lanzamiento", url=host))
itemlist.append(Item(channel=item.channel, action="Idiomas", title="Idiomas", url=host))
itemlist.append(Item(channel=item.channel, action="calidades", title="Por calidad", url=host))
itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url=host))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Año de Lanzamiento",
category = "lanzamiento"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Idiomas",
category = "idioma"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por calidad",
category = "calidades"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por género",
category = "generos"
))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url=host))
return itemlist
def PorFecha(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="lanzamiento">(.*?)</section>')
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
def filtro(item):
logger.info(item.category)
itemlist = []
patron1 = '<section class="%s">(.*?)</section>' %item.category
patron2 = '<a href="([^"]+).*?title="([^"]+)'
data = httptools.downloadpage(host).data
data = scrapertools.find_single_match(data, patron1)
matches = scrapertools.find_multiple_matches(data, patron2)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
return itemlist
def Idiomas(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="idioma">(.*?)</section>')
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
return itemlist
def calidades(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="calidades">(.*?)</section>')
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
return itemlist
def generos(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="generos">(.*?)</section>')
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
if "Adulto" in title and config.get_setting("adult_mode") == 0:
if "Adulto" in scrapedtitle and config.get_setting("adult_mode") == 0:
continue
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
Item(channel=item.channel, action="peliculas", title=scrapedtitle.strip(), url=scrapedurl,
viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "?s=" + texto
try:
# return buscar(item)
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
@@ -138,21 +75,20 @@ def search(item, texto):
def peliculas(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
tabla_pelis = scrapertools.find_single_match(data,
'class="section col-17 col-main grid-125 overflow clearfix">(.*?)</div></section>')
patron = '<img src="([^"]+)" alt="([^"]+).*?href="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(tabla_pelis)
itemlist = []
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
year = scrapertools.find_single_match(scrapedtitle, "[0-9]{4}")
fulltitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle, '\([0-9]+\)' ), "")
itemlist.append(Item(channel = item.channel,
item.infoLabels['year'] = year
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
title = scrapedtitle,
url = scrapedurl,
@@ -160,7 +96,7 @@ def peliculas(item):
plot = "",
fulltitle = fulltitle
))
tmdb.set_infoLabels(itemlist, True)
next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)')
if next_page != "":
itemlist.append(
@@ -172,31 +108,30 @@ def peliculas(item):
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
encontrados = []
itemtemp = []
data = httptools.downloadpage(item.url).data
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, server_name, language, quality in matches:
if scrapedurl in encontrados:
continue
encontrados.append(scrapedurl)
language = language.strip()
quality = quality.strip()
itemlist.append(Item(channel=item.channel,
mq = "(" + quality + ")"
if "http" in quality:
quality = mq = ""
titulo = "%s (" + language + ") " + mq
itemlist.append(item.clone(channel=item.channel,
action = "play",
extra = "",
fulltitle = item.fulltitle,
title = "%s (" + language + ") (" + quality + ")",
thumbnail = item.thumbnail,
title = titulo,
url = scrapedurl,
folder = False,
language = language,
quality = quality
))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel=item.channel))

View File

@@ -5,6 +5,7 @@ import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -31,8 +32,8 @@ def mainlist(item):
url= host + "/calidad/hd-real-720", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title=" Listado por género", action="porGenero", url= host))
itemlist.append(Item(channel=item.channel, title=" Buscar", action="search", url= host + "/?s="))
itemlist.append(Item(channel=item.channel, title=" Idioma", action="porIdioma", url= host))
itemlist.append(Item(channel=item.channel, title=" Buscar", action="search", url= host + "/?s="))
return itemlist
@@ -42,7 +43,7 @@ def porIdioma(item):
itemlist.append(Item(channel=item.channel, title="Castellano", action="agregadas",
url= host + "/idioma/espanol-castellano/", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title="VOS", action="agregadas", url= host + "/idioma/subtitulada/",
Item(channel=item.channel, title="VOSE", action="agregadas", url= host + "/idioma/subtitulada/",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Latino", action="agregadas",
url= host + "/idioma/espanol-latino/", viewmode="movie_with_plot"))
@@ -52,14 +53,10 @@ def porIdioma(item):
def porGenero(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<.*?span>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for urlgen, genero, cantidad in matches:
cantidad = cantidad.replace(".", "")
titulo = genero + " (" + cantidad + ")"
@@ -73,10 +70,9 @@ def search(item, texto):
logger.info()
texto_post = texto.replace(" ", "+")
item.url = host + "/?s=" + texto_post
try:
return listaBuscar(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
@@ -87,12 +83,9 @@ def search(item, texto):
def agregadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;|"', "", data)
patron = scrapertools.find_multiple_matches (data,'<divclass=col-mt-5 postsh>.*?Duración')
for element in patron:
info = scrapertools.find_single_match(element,
"calidad>(.*?)<.*?ahref=(.*?)>.*?'reflectMe' src=(.*?)\/>.*?<h2>(.*?)"
@@ -103,28 +96,22 @@ def agregadas(item):
title = info[3]
plot = info[4]
year = info[5].strip()
itemlist.append(Item(channel=item.channel,
action='findvideos',
contentType = "movie",
contentTitle = title,
fulltitle = title,
infoLabels={'year':year},
plot=plot,
quality=quality,
thumbnail=thumbnail,
title=title,
contentTitle = title,
url=url
))
# Paginación
try:
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
itemlist.append(Item(channel=item.channel, action="agregadas", title='Pagina Siguiente >>',
url=next_page.strip(),
viewmode="movie_with_plot"))
except:
pass
tmdb.set_infoLabels(itemlist, True)
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
itemlist.append(Item(channel=item.channel, action="agregadas", title='Pagina Siguiente >>',
url=next_page.strip(),
viewmode="movie_with_plot"))
return itemlist
@@ -135,11 +122,9 @@ def listaBuscar(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n", " ", data)
logger.info("data=" + data)
patron = 'class="row"> <a.*?="([^"]+).*?src="([^"]+).*?title="([^"]+).*?class="text-list">(.*?)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumbnail, title, sinopsis in matches:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
@@ -150,17 +135,13 @@ def listaBuscar(item):
def findvideos(item):
logger.info()
itemlist = []
plot = item.plot
# Descarga la pagina
data = httptools.downloadpage(item.url).data
patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedidioma, scrapedcalidad in matches:
idioma = ""
title = "%s [" + scrapedcalidad + "][" + scrapedidioma +"]"
if "youtube" in scrapedurl:
scrapedurl += "&"
@@ -168,16 +149,17 @@ def findvideos(item):
language = scrapedidioma
if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=plot, show=item.show, quality= quality, language=language, extra = item.thumbnail))
item.clone(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
quality= quality, language=language, extra = item.thumbnail))
tmdb.set_infoLabels(itemlist, True)
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.title,
extra="library"))
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle=item.title
))
return itemlist

View File

@@ -240,7 +240,7 @@ def findvideos(item):
))
for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_multiple_matches(data, '({"type":.*?})')
urls_list = scrapertools.find_multiple_matches(data, '{"reorder":1,"type":.*?}')
for element in urls_list:
json_data=jsontools.load(element)
@@ -253,6 +253,7 @@ def findvideos(item):
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
logger.debug('new_url: %s' % new_url)
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)

View File

@@ -176,27 +176,45 @@ def peliculas(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
datas = httptools.downloadpage(scrapedurl).data
datas = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", datas)
# logger.info(datas)
for scrapedurl, quality, year, scrapedtitle, scrapedthumbnail in matches:
if '/ ' in scrapedtitle:
scrapedtitle = scrapedtitle.partition('/ ')[2]
contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
director = scrapertools.find_single_match(
datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
title = scrapedtitle
contentTitle = title
url = scrapedurl
quality = quality
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie',
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
text_color=color3)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title, url=url,
quality=quality,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={"year": year},
text_color=color3
))
if year:
tmdb.set_infoLabels_item(new_item, __modo_grafico__)
itemlist.append(new_item)
# for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
# datas = httptools.downloadpage(scrapedurl).data
# datas = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", datas)
# # logger.info(datas)
# if '/ ' in scrapedtitle:
# scrapedtitle = scrapedtitle.partition('/ ')[2]
# contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
# contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
# rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
# director = scrapertools.find_single_match(
# datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
# title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
#
# logger.debug('thumbnail: %s' % scrapedthumbnail)
# new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie',
# url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
# contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
# text_color=color3)
# itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
if paginacion:
@@ -267,13 +285,13 @@ def findvideos(item):
if 'drive' not in servidores and 'streamvips' not in servidores and 'mediastream' not in servidores:
if 'ultrastream' not in servidores:
server = servertools.get_server_from_url('scrapedurl')
server = servertools.get_server_from_url(scrapedurl)
quality = scrapertools.find_single_match(
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(servidores.capitalize(),
quality.upper())
itemlist.append(item.clone(action='play', title=title, url='url', quality=item.quality,
itemlist.append(item.clone(action='play', title=title, url=scrapedurl, quality=item.quality,
server=server, language=lang.replace('Español ', ''),
text_color=color3, thumbnail=item.thumbnail))

View File

@@ -1,7 +1,7 @@
{
"id": "pordede",
"name": "Pordede",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "pordede.png",
@@ -105,4 +105,4 @@
]
}
]
}
}

View File

@@ -1,63 +0,0 @@
{
"id": "pymovie",
"name": "pymovie",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s27.postimg.org/hvmvz7vab/pymovie.png",
"banner": "https://s28.postimg.org/3k0wjnwul/pymovie_banner.png",
"version": 1,
"changes": [
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "04/01/2017",
"description": "Release."
}
],
"categories": [
"movie",
"tvshow",
"documentary"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,399 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = "http://www.pymovie.com.mx"
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
tgenero = {"comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"drama": "https://s16.postimg.org/94sia332d/drama.png",
"accion": "https://s3.postimg.org/y6o9puflv/accion.png",
"aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"animacion": "https://s13.postimg.org/5on877l87/animacion.png",
"ciencia ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"musical": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"deporte": "https://s13.postimg.org/xuxf5h06v/deporte.png",
"artes Marciales": "https://s24.postimg.org/w1aw45j5h/artesmarciales.png",
"intriga": "https://s27.postimg.org/v9og43u2b/intriga.png",
"infantil": "https://s23.postimg.org/g5rmazozv/infantil.png",
"mexicanas": "https://s3.postimg.org/p36ntnxfn/mexicana.png",
"espionaje": "https://s2.postimg.org/5hv64b989/espionaje.png",
"biografia": "https://s15.postimg.org/5lrpbx323/biografia.png"}
tcalidad = {'hd-1080': '[COLOR limegreen]HD-1080[/COLOR]', 'hd-720': '[COLOR limegreen]HD-720[/COLOR]',
'blueray': '[COLOR limegreen]BLUERAY[/COLOR]', 'dvd': '[COLOR limegreen]DVD[/COLOR]',
'cam': '[COLOR red]CAM[/COLOR]'}
tcalidad2 = {'hd-1080': 'https://s21.postimg.org/4h1s0t1wn/hd1080.png',
'hd-720': 'https://s12.postimg.org/lthu7v4q5/hd720.png', 'blueray': '',
'dvd': 'https://s1.postimg.org/m89hus1tb/dvd.png', 'cam': 'https://s11.postimg.org/ad4o5wpz7/cam.png'}
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas", action="menupeliculas",
thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png', extra='peliculas/'))
itemlist.append(itemlist[-1].clone(title="Series", action="menuseries",
thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
fanart='https://s27.postimg.org/iahczwgrn/series.png', extra='peliculas/'))
itemlist.append(itemlist[-1].clone(title="Documentales", action="menudocumental",
thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png',
fanart='https://s16.postimg.org/7xjj4bmol/documental.png', extra='documental'))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host + '/Ordenar/Estreno/?page=1',
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', extra='Estreno'))
itemlist.append(Item(channel=item.channel, title="Todas", action="lista", url=host + '?page=1',
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', extra='todas'))
itemlist.append(Item(channel=item.channel, title="Generos", action="seccion", url=host,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', extra='generos'))
itemlist.append(
Item(channel=item.channel, title="Alfabetico", action="lista", url=host + '/Ordenar/Alfabetico/?page=1',
thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', fanart='https://s17.postimg.org/fwi1y99en/a-z.png',
extra='Alfabetico'))
itemlist.append(Item(channel=item.channel, title="Calidad", action="seccion", url=host,
thumbnail='https://s13.postimg.org/6nzv8nlkn/calidad.png',
fanart='https://s13.postimg.org/6nzv8nlkn/calidad.png', extra='calidad'))
itemlist.append(
Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + '/Ordenar/MasVistas/?page=1',
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='Estreno'))
itemlist.append(
Item(channel=item.channel, title="Mas Votadas", action="lista", url=host + '/Ordenar/MasVotos/?page=1',
thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png',
fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='Estreno'))
itemlist.append(
Item(channel=item.channel, title="Calificacion", action="lista", url=host + '/Ordenar/Calificacion/?page=1',
thumbnail='https://s18.postimg.org/mjqrl49h5/calificacion.png',
fanart='https://s18.postimg.org/mjqrl49h5/calificacion.png', extra='Estreno'))
return itemlist
def menuseries(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host + "/Series-estreno/?page=1",
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', extra='series'))
itemlist.append(Item(channel=item.channel, title="Generos", action="seccion", url=host,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', extra='series-generos'))
itemlist.append(
Item(channel=item.channel, title="Alfabetico", action="lista", url=host + '/Ordernar-Serie/Alfabetico/?page=1',
thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', fanart='https://s17.postimg.org/fwi1y99en/a-z.png',
extra='series-alpha'))
itemlist.append(
Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + '/Ordernar-Serie/MasVistas/?page=1',
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='series-masvistas'))
itemlist.append(
Item(channel=item.channel, title="Mas Votadas", action="lista", url=host + '/Ordernar-Serie/Masvotos/?page=1',
thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png',
fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='series-masvotadas'))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="lista",
url=host + '/Ordernar-Serie/Recomendadas/?page=1',
thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png',
fanart='https://s12.postimg.org/s881laywd/recomendadas.png', extra='series-recomendadas'))
return itemlist
def menudocumental(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="lista", url=host + "/Documentales/?page=1",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', extra='documental'))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="lista",
url=host + "/OrdenarDocumental/Alfabetico/?page=1",
thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png',
fanart='https://s17.postimg.org/fwi1y99en/a-z.png', extra='documental'))
itemlist.append(Item(channel=item.channel, title="Mas Vistas", action="lista",
url=host + "/OrdenarDocumental/MasVistas/?page=1",
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='documental'))
return itemlist
def lista(item):
logger.info()
if item.extra == 'series':
accion = 'episodiosxtemp'
elif 'series-' in item.extra:
accion = 'temporadas'
else:
accion = 'findvideos'
itemlist = []
data = httptools.downloadpage(item.url).data
if 'series' in item.extra or item.extra == 'documental':
patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2"\/([^<]+)'
else:
patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2".*?>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedcalidad in matches:
url = scrapertools.decodeHtmlentities(host + scrapedurl)
url = url.strip(' ')
scrapedcalidad = scrapedcalidad.strip(' ')
scrapedcalidad = scrapedcalidad.strip('p')
scrapedcalidad = scrapedcalidad.lower()
if 'series' in item.extra or item.extra == 'documental':
title = scrapertools.decodeHtmlentities(scrapedtitle)
else:
calidad = tcalidad[scrapedcalidad]
title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (' + calidad + ') '
thumbnail = scrapedthumbnail
fanart = ''
plot = ''
itemlist.append(Item(channel=item.channel, action=accion, title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentSerieName=scrapedtitle, contentTitle=scrapedtitle, extra=item.extra))
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<a href="\?page=([^"]+)" class="next">next &')
while item.url[-1] != '=':
item.url = item.url[:-1]
next_page_url = item.url + next_page
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', extra=item.extra))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
patron = 'class="listatemporadas" ><a href="([^"]+)" title=".*?" ><img src="([^"]+)" width="80" height="100" title=".*?alt=".*?<h3>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ''
fanart = ''
contentSeasonNumber = scrapedtitle.replace('Temporada ', '')
itemlist.append(Item(channel=item.channel, action="episodiosxtemp", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, plot=plot, fanart=fanart, contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber))
if item.extra == 'temporadas':
for tempitem in itemlist:
templist += episodiosxtemp(tempitem)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodiosxtemp(tempitem)
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="\/VerCapitulo\/([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace(item.contentSeasonNumber + 'x' + '0' + str(ep), '')
url = host + '/VerCapitulo/' + scrapedtitle.replace(' ', '-')
title = item.contentSeasonNumber + 'x' + str(ep) + ' ' + scrapedtitle.strip('/')
thumbnail = item.thumbnail
plot = ''
fanart = ''
plot = ''
contentEpisodeNumber = ep
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, plot=plot, fanart=fanart, extra='series',
contentSerieName=item.contentSerieName, contentSeasonNumber=item.contentSeasonNumber,
contentEpisodeNumber=contentEpisodeNumber))
ep = ep + 1
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<option class="opselect" value="([^"]+)".*?>([^<]+)<\/option>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra == 'generos':
oplista = tgenero
opdir = '/Categoria/'
elif item.extra == 'calidad':
oplista = tcalidad
opdir = '/Calidad/'
elif item.extra == 'series-generos':
oplista = tgenero
opdir = '/Categoria-Series/'
for scrapeddir, scrapedtitle in matches:
url = item.url + opdir + scrapeddir + '/?page=1'
title = scrapedtitle.upper()
if 'generos' in item.extra and scrapedtitle.lower() in oplista:
thumbnail = oplista[scrapedtitle.lower()]
fanart = oplista[scrapedtitle.lower()]
elif 'calidad' in item.extra and scrapedtitle.lower() in oplista:
thumbnail = tcalidad2[scrapedtitle.lower()]
fanart = tcalidad[scrapedtitle.lower()]
else:
thumbnail = ''
fanart = ''
if scrapedtitle.lower() in oplista:
itemlist.append(Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, fanart=fanart, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]',
'Ingles': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]', 'Latino-Ingles': 'DUAL'}
data = httptools.downloadpage(item.url).data
if item.extra != 'series':
patron = 'data-video="([^"]+)" class="reproductorVideo"><ul><li>([^<]+)<\/li><li>([^<]+)<\/li>'
tipotitle = item.contentTitle
elif item.extra == 'series':
tipotitle = str(item.contentSeasonNumber) + 'x' + str(item.contentEpisodeNumber) + ' ' + item.contentSerieName
patron = '<li class="enlaces-l"><a href="([^"]+)" target="_blank"><ul><li>([^<]+)<.*?>([^<]+)<.*?>Reproducir<'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra != 'documental':
n = 0
for scrapedurl, scrapedcalidad, scrapedaudio in matches:
if 'series' in item.extra:
datab = httptools.downloadpage(host + scrapedurl).data
url = scrapertools.find_single_match(datab, 'class="reproductor"><iframe src="([^"]+)"')
print url + 'esta es la direccion'
else:
url = scrapedurl
title = tipotitle
idioma = audio[scrapedaudio]
itemlist.extend(servertools.find_video_items(data=url))
if n < len(itemlist):
itemlist[n].title = tipotitle + ' (' + idioma + ' ) ' + '(' + itemlist[n].server + ' )'
n = n + 1
else:
url = scrapertools.find_single_match(data, 'class="reproductor"><iframe src="([^"]+)"')
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
if item.extra == 'documental':
videoitem.title = item.title + ' (' + videoitem.server + ')'
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'series':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
item.extra = 'Estrenos'
try:
if categoria == 'peliculas':
item.url = host + '/Ordenar/Estreno/?page=1'
elif categoria == 'infantiles':
item.url = host + '/Categoria/Animacion/?page=1'
elif categoria == 'documentales':
item.url = host + '/Documentales/?page=1'
item.extra = 'documental'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
@@ -10,58 +9,57 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
host = "http://www.repelis.tv"
# Main list manual
def mainlist(item):
logger.info()
itemlist = []
item.url = "http://www.repelis.tv/pag/1"
mifan = "http://www.psicocine.com/wp-content/uploads/2013/08/Bad_Robot_Logo.jpg"
itemlist.append(Item(channel=item.channel, action="menupelis", title="Peliculas", url="http://www.repelis.tv/pag/1",
itemlist.append(Item(channel=item.channel, action="menupelis", title="Peliculas", url= host + "/pag/1",
thumbnail="http://www.gaceta.es/sites/default/files/styles/668x300/public"
"/metro_goldwyn_mayer_1926-web.png?itok=-lRSR9ZC",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menuestre", title="Estrenos",
url="http://www.repelis.tv/archivos/estrenos/pag/1",
url= host + "/archivos/estrenos/pag/1",
thumbnail="http://t0.gstatic.com/images?q=tbn"
":ANd9GcS4g68rmeLQFuX7iCrPwd00FI_OlINZXCYXEFrJHTZ0VSHefIIbaw",
fanart=mifan))
itemlist.append(
Item(channel=item.channel, action="menudesta", title="Destacadas", url="http://www.repelis.tv/pag/1",
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
url="http://www.repelis.tv/archivos/proximos-estrenos/pag/1",
url= host + "/archivos/proximos-estrenos/pag/1",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
url="http://www.repelis.tv/pag/1",
url= host + "/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
if config.get_setting("adult_mode") != 0:
itemlist.append(Item(channel=item.channel, action="menupelis", title="Eroticas +18",
url="http://www.repelis.tv/genero/eroticas/pag/1",
url= host + "/genero/eroticas/pag/1",
thumbnail="http://www.topkamisetas.com/catalogo/images/TB0005.gif",
fanart="http://www.topkamisetas.com/catalogo/images/TB0005.gif", extra='adult'))
# Quito la busqueda por año si no esta enabled el adultmode, porque no hay manera de filtrar los enlaces
# eroticos72
itemlist.append(
Item(channel=item.channel, action="poranyo", title="Por Año", url="http://www.repelis.tv/anio/2016",
Item(channel=item.channel, action="poranyo", title="Por Año", url= host + "/anio/2016",
thumbnail="http://t3.gstatic.com/images?q=tbn:ANd9GcSkxiYXdBcI0cvBLsb_nNlz_dWXHRl2Q"
"-ER9dPnP1gNUudhrqlR",
fanart=mifan))
# Por categoria si que filtra la categoria de eroticos
itemlist.append(Item(channel=item.channel, action="porcateg", title="Por Categoria",
url="http://www.repelis.tv/genero/accion/pag/1",
url= host + "/genero/accion/pag/1",
thumbnail="http://www.logopro.it/blog/wp-content/uploads/2013/07/categoria-sigaretta"
"-elettronica.png",
fanart=mifan))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar...", url="http://www.repelis.tv/search/?s=",
Item(channel=item.channel, action="search", title="Buscar...", url= host + "/search/?s=",
thumbnail="http://thumbs.dreamstime.com/x/buscar-pistas-13159747.jpg", fanart=mifan))
return itemlist
@@ -70,9 +68,7 @@ def mainlist(item):
def menupelis(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
if item.extra == '':
@@ -85,8 +81,6 @@ def menupelis(item):
section = 'de %s'%item.extra
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
@@ -97,25 +91,18 @@ def menupelis(item):
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
logger.info("He encontrado el segundo bloque")
logger.info("extra_info: %s" % extra_info)
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = scrapedurl
thumbnail = scrapedthumbnail
quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
language = scrapertools.find_multiple_matches(extra_info, 'class="(latino|espanol|subtitulado)"')
# if language = 'ingles':
# language='vo'
new_item=Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
infoLabels={'year': year})
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, language=language, quality=quality,
infoLabels={'year': year}))
tmdb.set_infoLabels(itemlist)
try:
next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
@@ -139,9 +126,6 @@ def menudesta(item):
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron += '<img src="(.*?)"'
@@ -150,11 +134,10 @@ def menudesta(item):
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = scrapedurl
thumbnail = scrapedthumbnail
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
thumbnail=thumbnail, fanart = thumbnail))
return itemlist
@@ -180,8 +163,8 @@ def menuestre(item):
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = scrapedurl
thumbnail = scrapedthumbnail
quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
language = scrapertools.find_single_match(extra_info, 'class="(latino|espanol|subtitulado)"')
@@ -190,10 +173,6 @@ def menuestre(item):
thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
infoLabels={'year': year}))
## Paginación
# <span class="current">2</span><a href="http://www.repelis.tv/page/3"
# Si falla no muestra ">> Página siguiente"
try:
next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
@@ -205,48 +184,25 @@ def menuestre(item):
def findvideos(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<h2>Sinopsis</h2>.*?<p>(.*?)</p>.*?<div id="informacion".*?</h2>.*?<p>(.*?)</p>' # titulo
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for sinopsis, title in matches:
title = "[COLOR white][B]" + title + "[/B][/COLOR]"
patron = '<div id="informacion".*?>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedplot in matches:
splot = title + "\n\n"
plot = scrapedplot
plot = re.sub('<h2>', "[COLOR red][B]", plot)
plot = re.sub('</h2>', "[/B][/COLOR] : ", plot)
plot = re.sub('<p>', "[COLOR green]", plot)
plot = re.sub('</p>', "[/COLOR]\n", plot)
plot = re.sub('<[^>]+>', "", plot)
splot += plot + "\n[COLOR red][B] Sinopsis[/B][/COLOR]\n " + sinopsis
# datos de los enlaces
'''
<a rel="nofollow" href="(.*?)".*?<td><img.*?</td><td>(.*?)</td><td>(.*?)</td></tr>
">Vimple</td>
'''
patron = '<tbody>(.*?)</tbody>'
matchesx = re.compile(patron, re.DOTALL).findall(data)
matchesx = scrapertools.find_multiple_matches(data, patron)
for bloq in matchesx:
patron = 'href="(.*?)".*?0 0">(.*?)</.*?<td>(.*?)</.*?<td>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloq)
matches = scrapertools.find_multiple_matches(bloq, patron)
for scrapedurl, scrapedserver, scrapedlang, scrapedquality in matches:
url = urlparse.urljoin(item.url, scrapedurl)
logger.info("Lang:[" + scrapedlang + "] Quality[" + scrapedquality + "] URL[" + url + "]")
url = scrapedurl
patronenlaces = '.*?://(.*?)/'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(scrapedurl)
matchesenlaces = scrapertools.find_multiple_matches(scrapedurl, patronenlaces)
scrapedtitle = ""
if scrapedserver == 'Vimple':
scrapedserver = 'vimpleru'
@@ -254,13 +210,14 @@ def findvideos(item):
scrapedserver = 'okru'
server = servertools.get_server_name(scrapedserver)
for scrapedenlace in matchesenlaces:
scrapedtitle = title + " [COLOR white][ [/COLOR]" + "[COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR]" + " [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver
scrapedtitle = "[COLOR white][ [/COLOR][COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR] [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, extra=title, url=url,
fanart=item.thumbnail, thumbnail=item.thumbnail, plot=splot, language=scrapedlang,
quality=scrapedquality, server=server))
itemlist.append(item.clone(action="play", title=scrapedtitle, extra=title, url=url,
fanart=item.thumbnail, language=scrapedlang,
quality=scrapedquality, server = server))
tmdb.set_infoLabels(itemlist)
if itemlist:
itemlist.append(Item(channel=item.channel))
itemlist.append(Item(channel=item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
@@ -270,18 +227,15 @@ def findvideos(item):
fulltitle=item.fulltitle))
return itemlist
def play(item):
logger.info()
itemlist =[]
data = httptools.downloadpage(item.url).data
enc = scrapertools.find_multiple_matches(data, "Player\.decode\('(.*?)'\)")
dec=''
for cod in enc:
dec+=decode(cod)
url = scrapertools.find_single_match(dec,'src="(.*?)"')
url = scrapertools.find_single_match(dec, 'src="(.*?)"')
itemlist.append(item.clone(url=url))
return itemlist
@@ -290,7 +244,7 @@ def play(item):
def search(item, texto):
logger.info(item.url)
texto = texto.replace(" ", "+")
item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto)
item.url = host + '/buscar/?s=%s' % (texto)
logger.info(item.url)
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
@@ -311,8 +265,8 @@ def search(item, texto):
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = item.url + scrapedurl
thumbnail = item.url + scrapedthumbnail
logger.info(url)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
@@ -332,7 +286,7 @@ def poranyo(item):
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = urlparse.urljoin(item.url, scrapedurl)
url = item.url + scrapedurl
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra='year'))
@@ -345,13 +299,12 @@ def porcateg(item):
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = urlparse.urljoin(item.url, scrapedurl)
url = scrapedurl
logger.info(url)
# si no esta permitidas categoria adultos, la filtramos
extra = title
@@ -399,4 +352,4 @@ def decode(string):
output = output.decode('utf8')
return output
return output

View File

@@ -119,14 +119,15 @@ def episodios(item):
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, bloqueidiomas in matches:
title = scrapedtitle.strip() + " (" + extrae_idiomas(bloqueidiomas) + ")"
idiomas, language = extrae_idiomas(bloqueidiomas)
title = scrapedtitle.strip() + " (" + idiomas + ")"
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, show=item.show, folder=True))
plot=plot, show=item.show, folder=True, language=language))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
@@ -142,18 +143,19 @@ def extrae_idiomas(bloqueidiomas):
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
textoidiomas = ""
language=[]
for idioma in idiomas:
if idioma == "1":
textoidiomas = textoidiomas + "Español" + "/"
if idioma == "2":
textoidiomas = textoidiomas + "Latino" + "/"
if idioma == "3":
textoidiomas = textoidiomas + "VOS" + "/"
textoidiomas = textoidiomas + "VOSE" + "/"
if idioma == "4":
textoidiomas = textoidiomas + "VO" + "/"
language.append(codigo_a_idioma(idioma))
textoidiomas = textoidiomas[:-1]
return textoidiomas
return textoidiomas, language
def codigo_a_idioma(codigo):
@@ -163,7 +165,7 @@ def codigo_a_idioma(codigo):
if codigo == "2":
idioma = "Latino"
if codigo == "3":
idioma = "VOS"
idioma = "VOSE"
if codigo == "4":
idioma = "VO"
@@ -195,14 +197,15 @@ def findvideos(item):
for idioma, servername, scrapedurl in matches:
title = "Mirror en " + servername + " (" + codigo_a_idioma(idioma) + ")"
language = codigo_a_idioma(idioma)
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, folder=False))
plot=plot, folder=False, language=language))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -108,7 +108,7 @@ def lista(item):
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<div class=pag_b><a href=(.*?) >Siguiente<\/a><\/div>')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=item.url + next_page,
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=host + next_page,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'))
return itemlist

View File

@@ -144,7 +144,7 @@ def lista_gen(item):
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle, context=context1))
show=scrapedtitle, context=context1, language=scrapedlang))
tmdb.set_infoLabels(itemlist)
# Paginacion

View File

@@ -171,12 +171,13 @@ def findvideos(item):
matches = re.compile(pattern, re.S).findall(data)
for url, server, lang in matches:
title = "[%s] - [%s]" % (lang, server)
for url, server, language in matches:
title = "[%s] - [%s]" % (language, server)
url = host + url
server = re.sub('(\..*)', '', server)
logger.debug("url %s" % url)
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, lang=lang))
thumbnail=item.thumbnail, language=language, server=server))
return itemlist
@@ -191,5 +192,6 @@ def play(item):
for video_item in itemlist:
video_item.title = "%s [%s]" % (item.fulltitle, item.lang)
video_item.thumbnail = item.thumbnail
video_item.language = item.language
return itemlist

View File

@@ -0,0 +1,25 @@
{
"id": "tiotorrent",
"name": "TioTorrent",
"active": true,
"adult": false,
"language": ["cast","lat"],
"thumbnail": "https://s1.postimg.org/29eths1fi7/tiotorrent.png",
"banner": "https://s1.postimg.org/9gkc73lxb3/tiotorrent-banner.png",
"version": 1,
"categories": [
"movie",
"tvshow",
"torrent"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,285 @@
# -*- coding: utf-8 -*-
# -*- Channel TioTorrent -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.tiotorrent.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_list",
thumbnail=get_thumb("channels_movie.png")
))
itemlist.append(item.clone(title="Series",
action="series_list",
thumbnail=get_thumb("channels_tvshow.png")
))
return itemlist
def movie_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="lista",
url=host + 'estrenos-de-cine',
extra='movie'
))
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'peliculas',
extra='movie'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/peliculas/?pTit=',
thumbnail=get_thumb("search.png"),
extra='movie'
))
return itemlist
def series_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'series',
extra='serie'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/series/?pTit=',
thumbnail=get_thumb("search.png"),
extra='serie'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista (item):
logger.info ()
itemlist = []
data = get_source(item.url)
if item.extra == 'movie':
patron = "<div class=moviesbox.*?><a href=(.*?)>.*?image:url\('(.*?)'\)>.*?<b>.*?>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle.decode('latin1').encode('utf8')
title = contentTitle
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w396", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
itemlist.append(item.clone(action='findvideos',
title=title, url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'filtro': filtro_list},
extra=item.extra
))
else:
patron = "<div class=moviesbox.*?>.*?episode>(.*?)x(.*?)<.*?href=(.*?)>.*?image:url\('(.*?)'.*?href.*?>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for season, episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentSerieName = scrapedtitle
title = '%s' % contentSerieName
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w396", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
contentSeason=season
contentEpisode=episode
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
contentSeason=contentSeason,
contentEpisode=contentEpisode,
infoLabels={'filtro': filtro_list},
extra=item.extra
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
if itemlist !=[]:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data,'<span class=pagination_next><a href=(.*?)>')
import inspect
if next_page !='':
itemlist.append(item.clone(action = "lista",
title = 'Siguiente >>>',
url = next_page,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def seasons(item):
logger.info()
itemlist=[]
infoLabels = item.infoLabels
data=get_source(item.url)
patron ='href=javascript:showSeasson\(.*?\); id=.*?>Temporada (.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for season in matches:
title='Temporada %s' % season
infoLabels['season'] = season
itemlist.append(Item(channel=item.channel,
title= title,
url=item.url,
action='episodesxseasons',
contentSeasonNumber=season,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName))
return itemlist
def all_episodes(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = "<div class=corner-episode>%sx(.\d+)<\/div><a href=(.*?)>.*?" % item.contentSeasonNumber
patron += "image:url\('(.*?)'.*?href.*?>(%s)<" % item.contentSerieName
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
contentEpisodeNumber=episode
season = item.contentSeasonNumber
url=scrapedurl
thumbnail=scrapedthumbnail
infoLabels['episode']=episode
title = '%sx%s - %s' % (season, episode, item.contentSerieName)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist=[]
data = get_source(item.url)
patron = "<a class=dload.*? target=_blank>.*?<\/a><i>(.*?)<\/i>.*?<a href=.*?showDownload\((.*?)\);"
matches = re.compile(patron, re.DOTALL).findall(data)
for quality, extra_info in matches:
extra_info= extra_info.replace("'",'')
extra_info= extra_info.split(',')
title = '%s [%s]' % (item.contentTitle, quality)
url = extra_info[1]
if item.extra == 'movie':
url = extra_info[1]
else:
url = extra_info[2]
server = 'torrent'
itemlist.append(Item(channel=item.channel,
title=title,
contentTitle= item.title,
url=url,
action='play',
quality=quality,
server=server,
thumbnail = item.infoLabels['thumbnail'],
infoLabels=item.infoLabels
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category == 'peliculas':
item.url = host + 'estrenos-de-cine'
item.extra='movie'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -13,7 +13,7 @@ from platformcode import config, logger
host = 'http://verpeliculasnuevas.com'
IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'sub': 'VOS'}
IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'sub': 'VOSE'}
list_language = IDIOMAS.values()
taudio = {'latino': '[COLOR limegreen]LATINO[/COLOR]',

View File

@@ -1,4 +1,7 @@
# -*- coding: utf-8 -*-
# -*- Channel TioTorrent -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re

View File

@@ -202,7 +202,7 @@ def filterchannels(category, view="thumb_"):
def get_thumb(thumb_name, view="thumb_"):
icon_pack_name = config.get_setting('icon_set')
icon_pack_name = config.get_setting('icon_set', default="default")
if icon_pack_name == "default":
resource_path = os.path.join(config.get_runtime_path(), "resources", "media", "themes")
else:

View File

@@ -223,7 +223,6 @@ def save_tvshow(item, episodelist):
return 0, 0, -1 # Salimos sin guardar
scraper_return = scraper.find_and_set_infoLabels(item)
# Llegados a este punto podemos tener:
# scraper_return = True: Un item con infoLabels con la información actualizada de la serie
# scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana)
@@ -238,6 +237,8 @@ def save_tvshow(item, episodelist):
if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']:
base_name = item.infoLabels['originaltitle']
elif item.infoLabels['tvshowtitle']:
base_name = item.infoLabels['tvshowtitle']
elif item.infoLabels['title']:
base_name = item.infoLabels['title']
else:
@@ -566,7 +567,6 @@ def add_tvshow(item, channel=None):
# Obtiene el listado de episodios
itemlist = getattr(channel, item.action)(item)
insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist)
if not insertados and not sobreescritos and not fallidos:

View File

@@ -5,7 +5,7 @@ from threading import Timer
import xbmc
import xbmcaddon
import xbmcgui
from core import filetools
from channelselector import get_thumb
from platformcode import config
@@ -82,27 +82,13 @@ def set_key():
MAIN_MENU = {
"news": {"label": "Novedades",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_news.png"), "order": 0},
"channels": {"label": "Canales",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_channels.png"), "order": 1},
"search": {"label": "Buscador",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_search.png"), "order": 2},
"favorites": {"label": "Favoritos",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_favorites.png"), "order": 3},
"videolibrary": {"label": "Videoteca",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_videolibrary.png"), "order": 4},
"downloads": {"label": "Descargas",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_downloads.png"), "order": 5},
"settings": {"label": "Configuración",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_setting_0.png"), "order": 6},
"news": {"label": "Novedades", "icon": get_thumb("news.png"), "order": 0},
"channels": {"label": "Canales", "icon": get_thumb("channels.png"), "order": 1},
"search": {"label": "Buscador", "icon": get_thumb("search.png"), "order": 2},
"favorites": {"label": "Favoritos", "icon": get_thumb("favorites.png"), "order": 3},
"videolibrary": {"label": "Videoteca", "icon": get_thumb("videolibrary.png"), "order": 4},
"downloads": {"label": "Descargas", "icon": get_thumb("downloads.png"), "order": 5},
"settings": {"label": "Configuración", "icon": get_thumb("setting_0.png"), "order": 6}
}

View File

@@ -39,7 +39,7 @@
<setting id="videolibrary_kodi" type="bool" label="Autoconfigurar videoteca de XBMC/Kodi para contenido de Alfa" enable="lt(-1,2)+eq(0,false)" default="false"/>
</category>
<category label="Opciones Visuales">
<setting id="icon_set" type="labelenum" label="Set de iconos" lvalues="default|dark" default="default"/>
<setting id="icon_set" type="labelenum" label="Set de iconos" values="default|dark" default="default"/>
</category>
<category label="Otros">
<setting label="Info de películas/series en menú contextual" type="lsep"/>

View File

@@ -10,7 +10,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "https://www.bitporno.com/e/([A-z0-9]+)",
"pattern": "https://www.bitporno.com/(?:e|embed)/([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
},
{

View File

@@ -0,0 +1,49 @@
{
"active": true,
"changes": [
{
"date": "09/10/2017",
"description": "Versión inicial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "cloudsany.com/i/([A-z0-9]+)",
"url": "https://cloudsany.com/i/\\1"
}
]
},
"free": true,
"id": "cloudsany",
"name": "cloudsany",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/6wixo35myn/cloudsany1.png",
"version": 1
}

View File

@@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para cloudsany
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data:
return False, "[Cloudsany] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, 'p,a,c,k,e.*?</script>')
unpack = jsunpack.unpack(data)
logger.info("Intel11 %s" %unpack)
video_urls = []
videourl = scrapertools.find_single_match(unpack, 'config={file:"([^"]+)')
video_urls.append([".MP4 [Cloudsany]", videourl])
return video_urls

View File

@@ -48,5 +48,6 @@
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/9e6doboo2n/cloudy1.png",
"version": 1
}

View File

@@ -45,7 +45,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv"
coding_url = 'https://www.flashx.tv/flashx.php?fxfx=7'
coding_url = 'https://www.flashx.tv/flashx.php?f=x&fxfx=6'
headers['X-Requested-With'] = 'XMLHttpRequest'
httptools.downloadpage(coding_url, headers=headers)

View File

@@ -7,12 +7,12 @@ from core import scrapertools
from lib import jsunpack
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url, add_referer = True).data
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
@@ -24,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url, add_referer = True, headers=headers).data
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
@@ -32,7 +32,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = jsunpack.unpack(packer)
data = re.sub(r'\n|\t|\s+', '', data)
host = scrapertools.find_single_match(data, '\[\{image:"(http://[^/]+/)')
mediaurl = scrapertools.find_single_match(data, ',\{file:"([^"]+)"')
if not mediaurl.startswith(host):

View File

@@ -1,49 +1,43 @@
{
"active": true,
"changes": [
{
"date": "16/02/2017",
"description": "Primera versión"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:copiapop.com|diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "copiapop",
"name": "copiapop",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:kbagi.com|diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "kbagi",
"name": "kbagi",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
}

View File

@@ -8,15 +8,15 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
if "copiapop.com" in page_url:
from channels import copiapop
logueado, error_message = copiapop.login("copiapop.com")
if "kbagi.com" in page_url:
from channels import kbagi
logueado, error_message = kbagi.login("kbagi.com")
if not logueado:
return False, error_message
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
return False, "[Copiapop] El archivo no existe o ha sido borrado"
return False, "[kbagi] El archivo no existe o ha sido borrado"
return True, ""
@@ -26,8 +26,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://copiapop.com"
host_string = "copiapop"
host = "http://kbagi.com"
host_string = "kbagi"
if "diskokosmiko.mx" in page_url:
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"

View File

@@ -10,7 +10,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(rapidgator.net/file/.*?(?:\\.html))",
"pattern": "(rapidgator.net/file/\\w+(?:\\.html|))",
"url": "http://\\1"
}
]
@@ -50,4 +50,4 @@
],
"thumbnail": "server_rapidgator.png",
"version": 1
}
}

View File

@@ -68,5 +68,6 @@
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/912d5vxmv3/streamplay1.png",
"version": 1
}
}

View File

@@ -20,10 +20,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
key = scrapertools.find_single_match(data, "var mpri_Key\s*=\s*'([^']+)'")
data_vt = httptools.downloadpage("http://vidup.me/jwv/%s" % key).data
vt = scrapertools.find_single_match(data_vt, 'direct\|([^\|]+)\|')
vt = scrapertools.find_single_match(data_vt, 'file\|(.*?)\|direct')
# Extrae la URL
video_urls = []

View File

@@ -52,5 +52,6 @@
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/597or3a31b/vidzi1.png",
"version": 1
}
}

View File

@@ -58,5 +58,6 @@
"visible": false
}
],
"thumbnail" : "https://s1.postimg.org/4wje61el4f/yourupload1.png",
"version": 1
}
}