Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2017-09-04 11:24:38 -03:00
23 changed files with 1015 additions and 321 deletions

View File

@@ -677,16 +677,17 @@ class platform(Platformtools):
if not "label" in c: continue
# Obtenemos el valor
if not c["id"] in dict_values:
if not callback:
c["value"] = config.get_setting(c["id"], **kwargs)
if "id" in c:
if not c["id"] in dict_values:
if not callback:
c["value"] = config.get_setting(c["id"], **kwargs)
else:
c["value"] = c["default"]
dict_values[c["id"]] = c["value"]
else:
c["value"] = c["default"]
dict_values[c["id"]] = c["value"]
else:
c["value"] = dict_values[c["id"]]
c["value"] = dict_values[c["id"]]
# Translation
if c['label'].startswith('@') and unicode(c['label'][1:]).isnumeric():

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="1.9.2" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="1.9.4" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -18,14 +18,14 @@
<screenshot>resources/media/general/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
[I]- pelis24
- vidoza
- cinetux
- peliculasrey
- newpct1
- animeflv
- fixes internos[/I]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
[I]- newpct1
- gnula.mobi
- divxtotal
- tvseriesdk
- maxipelis
- fix internos[/I]
[COLOR green]Gracias a [COLOR yellow]paeznet[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

6
plugin.video.alfa/channels/divxtotal.py Executable file → Normal file
View File

@@ -232,7 +232,6 @@ def findtemporadas(item):
th.start()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if len(item.extra.split("|")):
if len(item.extra.split("|")) >= 4:
fanart = item.extra.split("|")[2]
@@ -266,7 +265,7 @@ def findtemporadas(item):
fanart_extra = item.fanart
fanart_info = item.fanart
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) </a>(.*?)</table>')
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada.*?(\d+).*?<\/a>(.*?)<\/table>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
@@ -299,9 +298,8 @@ def epis(item):
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url,
'<td><img src=".*?images/(.*?)\.png.*?<a href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
'<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
for idioma, url, epi in patron:
episodio = scrapertools.find_single_match(epi, '\d+x(\d+)')
item.infoLabels['episode'] = episodio

View File

@@ -0,0 +1,30 @@
{
"id": "gmobi",
"name": "gmobi",
"active": true,
"adult": false,
"language": "es",
"version": 1,
"thumbnail": "http://gnula.mobi/wp-content/uploads/2016/08/Untitled-6.png",
"banner": "",
"changes": [
{
"date": "25/08/2017",
"description": "Nuevo canal"
}
],
"categories": [
"movie",
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -0,0 +1,95 @@
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# Alfa
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import httptools
from core import tmdb
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Novedades" , action="peliculas", url="http://gnula.mobi/"))
itemlist.append(item.clone(title="Castellano" , action="peliculas",
url="http://www.gnula.mobi/tag/esp)anol/"))
itemlist.append(item.clone(title="Latino" , action="peliculas", url="http://gnula.mobi/tag/latino/"))
itemlist.append(item.clone(title="VOSE" , action="peliculas", url="http://gnula.mobi/tag/subtitulada/"))
itemlist.append(item.clone(title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://gnula.mobi/?s=%s" % texto
try:
return sub_search(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="row">.*?<a href="([^"]+)" title="([^"]+)">.*?<img src="(.*?)" title'
matches = scrapertools.find_multiple_matches(data, patron)
for url,name,img in matches:
itemlist.append(item.clone(title=name, url=url, action="findvideos", show=name, thumbnail=img))
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="glyphicon '
'glyphicon-chevron-right" aria-hidden="true"></i>')
if paginacion:
itemlist.append(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)')
thumbnail = scrapedthumbnail
new_item =Item (channel = item.channel, action="findvideos", title=title, contentTitle=title, url=url,
thumbnail=thumbnail, infoLabels = {'year':year})
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="(.*?)"\/>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(action="peliculas", title="Siguiente >>", text_color="yellow",
url=next_page_url))
return itemlist

View File

@@ -0,0 +1,30 @@
{
"id": "maxipelis",
"name": "Maxipelis",
"active": true,
"adult": false,
"language": "es",
"version": 1,
"thumbnail": "http://www.maxipelis.net/wp-content/uploads/2016/12/applogo.png",
"banner": "",
"changes": [
{
"date": "25/08/2017",
"description": "Nuevo canal"
}
],
"categories": [
"movie",
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,149 @@
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# Alfa
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.maxipelis.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/pelicula"))
itemlist.append(Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return sub_search(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="thumbnail animation-2"> <a href="([^"]+)"> <img src="([^"]+)" alt="(.*?)" />.*?'
patron +='<div class="contenido"><p>(.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
for url,img,name,plot in matches:
itemlist.append(item.clone(channel=item.channel, action="findvideos", title=name, url=url, plot=plot,
thumbnail=img))
paginacion = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)" ><span class="'
'icon-chevron-right"></span>')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item"><a href="([^"]+)".*?>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=host + scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster">.*?src="(.*?)" alt="(.*?)">.*?'
patron += '"quality">(.*?)<.*?href="(.*?)".*?<span>(\d{4}).*?"texto">(.*?)<.*?'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedquality, scrapedurl, scrapedyear, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle
quality = scrapedquality
year = scrapedyear
plot = scrapedplot
if quality == "" or year=="" :
title = contentTitle
else:
title = contentTitle + " (" + year + ") " + "[COLOR red]" + quality + "[/COLOR]"
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = contentTitle , infoLabels={'year':year} )
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
try:
patron = '<a href="([^"]+)" ><span class="icon-chevron-right"></span></a></div>'
next_page = re.compile(patron,re.DOTALL).findall(data)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Siguiente >>" , text_color="yellow",
url=next_page[0]))
except: pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<tr><td> <a class="link_a" href="([^"]+)".*?<td> (.*?)</td><td> (.*?)</td><td> (.*?)</td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, server, idioma, calidad in matches:
title = server + " [" + idioma + "] [" + calidad + "]"
itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=url))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Agregar esta pelicula a la Videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle = item.contentTitle))
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -8,7 +8,9 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
host = 'http://newpct1.com/'
def mainlist(item):
logger.info()
@@ -17,13 +19,15 @@ def mainlist(item):
thumb_pelis=get_thumb("channels_movie.png")
thumb_series=get_thumb("channels_tvshow.png")
thumb_search = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url="http://www.newpct1.com/",
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
extra="peliculas", thumbnail=thumb_pelis ))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series",
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
thumbnail=thumb_series))
# itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
return itemlist
@@ -96,7 +100,7 @@ def alfabeto(item):
title = scrapedtitle.upper()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="completo", title=title, url=url, extra=item.extra))
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
return itemlist
@@ -105,13 +109,23 @@ def listado(item):
logger.info()
# logger.info("[newpct1.py] listado url=" + item.url)
itemlist = []
url_next_page =''
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
#logger.debug(data)
logger.debug('item.modo: %s'%item.modo)
logger.debug('item.extra: %s'%item.extra)
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
logger.debug("patron=" + patron)
fichas = scrapertools.get_match(data, patron)
if item.modo != 'next' or item.modo =='':
logger.debug('item.title: %s'% item.title)
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
logger.debug("patron=" + patron)
fichas = scrapertools.get_match(data, patron)
page_extra = item.extra
else:
fichas = data
page_extra = item.extra
# <li><a href="http://www.newpct1.com/pelicula/x-men-dias-del-futuro-pasado/ts-screener/" title="Descargar XMen Dias Del Futuro gratis"><img src="http://www.newpct1.com/pictures/f/58066_x-men-dias-del-futuro--blurayrip-ac3-5.1.jpg" width="130" height="180" alt="Descargar XMen Dias Del Futuro gratis"><h2>XMen Dias Del Futuro </h2><span>BluRayRip AC3 5.1</span></a></li>
patron = '<li><a href="([^"]+).*?' # url
@@ -120,6 +134,25 @@ def listado(item):
patron += '<span>([^<]*)</span>' # calidad
matches = re.compile(patron, re.DOTALL).findall(fichas)
logger.debug('item.next_page: %s'%item.next_page)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
modo = 'continue'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = '<a href="([^"]+)">Next<\/a>'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
modo = 'continue'
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
modo = 'next'
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
url = scrapedurl
@@ -127,33 +160,17 @@ def listado(item):
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if "1.com/series" in url:
action = "completo"
action = "episodios"
extra = "serie"
title = scrapertools.find_single_match(title, '([^-]+)')
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
1).strip()
# logger.info("[newpct1.py] titulo="+title)
'''
if len(title)>3:
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22'
else:
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title
if "1.com/series-hd" in url:
extra="serie-hd"
url = url_i + '&categoryID=&categoryIDR=1469&calidad=' + calidad.replace(" ","+") #DTV+720p+AC3+5.1
elif "1.com/series-vo" in url:
extra="serie-vo"
url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1
elif "1.com/series/" in url:
extra="serie-tv"
url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+")
url += '&idioma=&ordenar=Nombre&inon=Descendente'
'''
else:
title = title.replace("Descargar", "", 1).strip()
if title.endswith("gratis"): title = title[:-7]
@@ -164,9 +181,10 @@ def listado(item):
context = ""
context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/")
#logger.debug('context_title[0]: %s' % context_title[0])
if context_title:
try:
context = context_title[0].replace("pelicula", "movie").replace("descargar", "movie").replace("series",
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
"tvshow")
context_title = context_title[1].replace("-", " ")
if re.search('\d{4}', context_title[-4:]):
@@ -176,22 +194,126 @@ def listado(item):
except:
context_title = show
logger.debug('contxt title: %s'%context_title)
logger.debug('year: %s' % year)
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, extra=extra, show=show,
contentTitle=context_title, contentType=context, context=["buscar_trailer"]))
logger.debug('context: %s' % context)
if not 'array' in title:
new_item = Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
extra = extra,
show = context_title, contentTitle=context_title, contentType=context,
context=["buscar_trailer"], infoLabels= {'year':year})
if year:
tmdb.set_infoLabels_item(new_item, seekTmdb = True)
itemlist.append(new_item)
if "pagination" in data:
patron = '<ul class="pagination">(.*?)</ul>'
paginacion = scrapertools.get_match(data, patron)
if "Next" in paginacion:
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1].replace(" ", "%20")
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page,
extra=item.extra))
if url_next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
url=url_next_page, next_page=next_page, folder=True,
text_color='yellow', text_bold=True, modo = modo, plot = extra,
extra = page_extra))
# if "pagination" in data:
# patron = '<ul class="pagination">(.*?)</ul>'
# paginacion = scrapertools.get_match(data, patron)
#
# if "Next" in paginacion:
# url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1].replace(" ", "%20")
# itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page,
# extra=item.extra))
# logger.info("[newpct1.py] listado items:" + str(len(itemlist)))
return itemlist
def listado2(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug(data)
list_chars = [["ñ", "ñ"]]
for el in list_chars:
data = re.sub(r"%s" % el[0], el[1], data)
try:
# logger.debug("data %s " % data)
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
except:
post = False
if post:
# logger.debug("post %s" % post)
# logger.debug("item.post %s" % item.post)
if "pg" in item.post:
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
# logger.debug("item.post %s" % item.post)
else:
item.post += "&pg=%s" % post
# logger.debug("item.post %s" % item.post)
# logger.debug("data %s " % next_page)
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
data = scrapertools.get_match(data, pattern)
# logger.debug("data %s " % data)
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title in matches:
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ")
# logger.debug("\n\nu %s " % url)
# logger.debug("\nb %s " % thumb)
# logger.debug("\nt %s " % title)
# title is the clean way but it doesn't work if it's a long, so we have to use title_to_fix
# title_fix = False
# if title.endswith(".."):
# title = title_to_fix
# title_fix = True
# no mostramos lo que no sean videos
if "/juego/" in url or "/varios/" in url:
continue
if ".com/series" in url:
# title = scrapertools.find_single_match(title, '([^-]+)')
# title = title.replace("Ver online", "", 1).replace("Ver en linea", "", 1). \
# replace("Descarga Serie HD", "", 1).strip()
show = title
# if quality:
# title = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"], show=show))
else:
# title = title.replace("Descargar", "", 1).strip()
# if title.endswith("gratis"):
# title = title[:-6].strip()
# if quality:
# title = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"]))
if post:
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
thumbnail=get_thumb("next.png")))
return itemlist
def completo(item):
logger.info()
@@ -202,6 +324,7 @@ def completo(item):
item_extra = item.extra
item_show = item.show
item_title = item.title
infoLabels = item.infoLabels
# Lee las entradas
if item_extra.startswith("serie"):
@@ -225,14 +348,14 @@ def completo(item):
fanart = oTvdb.get_graphics_by_serieId(serieID)
if len(fanart)>0:
item.fanart = fanart[0]'''
try:
from core.tmdb import Tmdb
oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es")
item.fanart = oTmdb.get_backdrop()
item.plot = oTmdb.get_sinopsis()
print item.plot
except:
pass
# try:
# from core.tmdb import Tmdb
# oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es")
# item.fanart = oTmdb.get_backdrop()
# item.plot = oTmdb.get_sinopsis()
# print item.plot
# except:
# pass
else:
item_title = item.show
@@ -281,109 +404,6 @@ def completo(item):
return itemlist
def get_episodios(item):
logger.info("url=" + item.url)
itemlist = []
data = re.sub(r'\n|\r|\t|\s{2}|<!--.*?-->|<i class="icon[^>]+"></i>', "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug("data=" + data)
patron = '<ul class="buscar-list">(.*?)</ul>'
# logger.info("[newpct1.py] patron=" + patron)
fichas = scrapertools.get_match(data, patron)
# logger.info("[newpct1.py] matches=" + str(len(fichas)))
# <li><a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><img src="http://www.newpct1.com/pictures/c/minis/1880_forever.jpg" alt="Serie Forever 1x01"></a> <div class="info"> <a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><h2 style="padding:0;">Serie <strong style="color:red;background:none;">Forever - Temporada 1 </strong> - Temporada<span style="color:red;background:none;">[ 1 ]</span>Capitulo<span style="color:red;background:none;">[ 01 ]</span><span style="color:red;background:none;padding:0px;">Espa<70>ol Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2></a> <span>27-10-2014</span> <span>450 MB</span> <span class="color"><ahref="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"> Descargar</a> </div></li>
# logger.info("[newpct1.py] get_episodios: " + fichas)
patron = '<li[^>]*><a href="([^"]+).*?' # url
patron += '<img src="([^"]+)".*?' # thumbnail
patron += '<h2 style="padding(.*?)/h2>' # titulo, idioma y calidad
matches = re.compile(patron, re.DOTALL).findall(fichas)
# logger.info("[newpct1.py] get_episodios matches: " + str(len(matches)))
for scrapedurl, scrapedthumbnail, scrapedinfo in matches:
try:
url = scrapedurl
if '</span>' in scrapedinfo:
# logger.info("[newpct1.py] get_episodios: scrapedinfo="+scrapedinfo)
try:
# <h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Big Bang Theory - Temporada 6 </strong> - Temporada<span style="color:red;background:none;">[ 6 ]</span>Capitulo<span style="color:red;background:none;">[ 03 ]</span><span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2>
patron = '<span style=".*?">\[\s*(.*?)\]</span>.*?' # temporada
patron += '<span style=".*?">\[\s*(.*?)\].*?' # capitulo
patron += ';([^/]+)' # idioma
info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo)
(temporada, capitulo, idioma) = info_extra[0]
except:
# <h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Affair Temporada 3 Capitulo 5</strong> - <span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2>
patron = '<strong style=".*?">([^<]+).*?' # temporada y capitulo
patron += '<span style=".*?">([^<]+)'
info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo)
(temporada_capitulo, idioma) = info_extra[0]
if re.search(r'(?i)Capitulos', temporada_capitulo):
temporada = scrapertools.find_single_match(temporada_capitulo, 'Temp.*?\s*([\d]+)')
cap1, cap2 = scrapertools.find_single_match(temporada_capitulo, 'Cap.*?\s*(\d+).*?(\d+)')
capitulo = ""
else:
temporada, capitulo = scrapertools.get_season_and_episode(temporada_capitulo).split('x')
# logger.info("[newpct1.py] get_episodios: temporada=" + temporada)
# logger.info("[newpct1.py] get_episodios: capitulo=" + capitulo)
logger.debug("idioma=" + idioma)
if '">' in idioma:
idioma = " [" + scrapertools.find_single_match(idioma, '">([^<]+)').strip() + "]"
elif '&nbsp' in idioma:
idioma = " [" + scrapertools.find_single_match(idioma, '&nbsp;([^<]+)').strip() + "]"
'''else:
idioma=""'''
if capitulo:
title = item.title + " (" + temporada.strip() + "x" + capitulo.strip() + ") " + idioma
else:
title = item.title + " (Del %sx%s al %sx%s) %s" % (temporada, cap1, temporada, cap2, idioma)
else:
# <h2 style="padding:0;">The Big Bang Theory - Temporada 6 [HDTV][Cap.602][Español Castellano]</h2>
# <h2 style="padding:0;">The Beast - Temporada 1 [HDTV] [Capítulo 13] [Español]</h2
# <h2 style="padding:0;">The Beast - Temp.1 [DVD-DVB][Cap.103][Spanish]</h2>
try:
temp, cap = scrapertools.get_season_and_episode(scrapedinfo).split('x')
except:
# Formatear temporadaXepisodio
patron = re.compile('Cap.*?\s*([\d]+)', re.IGNORECASE)
info_extra = patron.search(scrapedinfo)
if len(str(info_extra.group(1))) >= 3:
cap = info_extra.group(1)[-2:]
temp = info_extra.group(1)[:-2]
else:
cap = info_extra.group(1)
patron = 'Temp.*?\s*([\d]+)'
temp = re.compile(patron, re.IGNORECASE).search(scrapedinfo).group(1)
title = item.title + " (" + temp + 'x' + cap + ")"
# logger.info("[newpct1.py] get_episodios: fanart= " +item.fanart)
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
show=item.show, fanart=item.fanart))
except:
logger.error("ERROR al añadir un episodio")
if "pagination" in data:
patron = '<ul class="pagination">(.*?)</ul>'
paginacion = scrapertools.get_match(data, patron)
# logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion)
if "Next" in paginacion:
url_next_page = "http" + scrapertools.get_match(paginacion, '<a href="http([^>]+)>Next</a>')[:-1]
url_next_page = url_next_page.replace(" ", "%20")
# logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page)
itemlist.append(
Item(channel=item.channel, action="get_episodios", title=">> Página siguiente", url=url_next_page))
return itemlist
def buscar_en_subcategoria(titulo, categoria):
data = httptools.downloadpage("http://www.newpct1.com/pct1/library/include/ajax/get_subcategory.php",
post="categoryIDR=" + categoria).data
@@ -491,6 +511,124 @@ def findvideos(item):
return itemlist
# def episodios(item):
# # Necesario para las actualizaciones automaticas
# infoLabels= item.infoLabels
# infoLabels['show']=item.show
# return completo(Item(item.clone(url=item.url, extra="serie_add", infoLabels=infoLabels)))
def episodios(item):
# Necesario para las actualizaciones automaticas
return completo(Item(channel=item.channel, url=item.url, show=item.show, extra="serie_add"))
logger.info()
itemlist = []
infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
# logger.debug("data %s " % data)
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
pagination = scrapertools.find_single_match(data, pattern)
# logger.debug("pagination %s" % pagination)
if pagination:
pattern = '<li><a href="([^"]+)">Last<\/a>'
full_url = scrapertools.find_single_match(pagination, pattern)
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
list_pages = []
for x in range(1, int(last_page) + 1):
list_pages.append("%s%s" % (url, x))
# logger.debug("data %s%s" % (url, x))
# logger.debug("list_pages %s" % list_pages)
else:
list_pages = [item.url]
for index, page in enumerate(list_pages):
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
data = scrapertools.get_match(data, pattern)
# logger.debug("data %s " % data)
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
# logger.debug("data %s " % matches)
for url, thumb, info in matches:
# logger.debug("info %s" % info)
if "<span" in info: # new style
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
if match["episode2"]:
multi = True
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
str(match["episode2"]).zfill(2), match["lang"],
match["quality"])
else:
multi = False
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
match["lang"], match["quality"])
else: # old style
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
# logger.debug("data %s" % match)
str_lang = ""
if match["lang"] is not None:
str_lang = "[%s]" % match["lang"]
if match["season2"] and match["episode2"]:
multi = True
if match["season"] == match["season2"]:
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["episode2"], str_lang, match["quality"])
else:
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["season2"], match["episode2"], str_lang,
match["quality"])
else:
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
match["quality"])
multi = False
season = match['season']
episode = match['episode']
infoLabels['season']= season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
quality=item.quality, multi=multi, contentSeason=season,
contentEpisodeNumber=episode, infoLabels = infoLabels))
# order list
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
if len(itemlist) > 1:
return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
return itemlist
def search(item, texto):
logger.info("search:" + texto)
# texto = texto.replace(" ", "+")
try:
item.post = "q=%s" % texto
item.pattern = "buscar-list"
itemlist = listado2(item)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Channel for recent videos on several channels
# ------------------------------------------------------------
@@ -41,63 +41,51 @@ def mainlist(item):
if list_canales['peliculas']:
thumbnail = get_thumb("channels_movie.png")
new_item = Item(channel=item.channel, action="novedades", extra="peliculas", title="Películas",
thumbnail=thumbnail)
new_item = Item(channel=item.channel, action="novedades", extra="peliculas", title="Películas",
thumbnail=thumbnail)
new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title,
"extra": new_item.extra,
"action": "setting_channel",
"channel": new_item.channel}]
new_item.category = "Novedades en %s" % new_item.extra
itemlist.append(new_item)
set_category_context(new_item)
itemlist.append(new_item)
if list_canales['infantiles']:
thumbnail = get_thumb("channels_children.png")
new_item = Item(channel=item.channel, action="novedades", extra="infantiles", title="Para niños",
thumbnail=thumbnail)
new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title,
"extra": new_item.extra,
"action": "setting_channel",
"channel": new_item.channel}]
new_item.category = "Novedades en %s" % new_item.extra
itemlist.append(new_item)
new_item = Item(channel=item.channel, action="novedades", extra="infantiles", title="Para niños",
thumbnail=thumbnail)
set_category_context(new_item)
itemlist.append(new_item)
if list_canales['series']:
thumbnail = get_thumb("channels_tvshow.png")
new_item = Item(channel=item.channel, action="novedades", extra="series", title="Episodios de series",
thumbnail=thumbnail)
new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title,
"extra": new_item.extra,
"action": "setting_channel",
"channel": new_item.channel}]
new_item.category = "Novedades en %s" % new_item.extra
itemlist.append(new_item)
new_item = Item(channel=item.channel, action="novedades", extra="series", title="Episodios de series",
thumbnail=thumbnail)
set_category_context(new_item)
itemlist.append(new_item)
if list_canales['anime']:
thumbnail = get_thumb("channels_anime.png")
new_item = Item(channel=item.channel, action="novedades", extra="anime", title="Episodios de anime",
thumbnail=thumbnail)
new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title,
"extra": new_item.extra,
"action": "setting_channel",
"channel": new_item.channel}]
new_item.category = "Novedades en %s" % new_item.extra
itemlist.append(new_item)
new_item = Item(channel=item.channel, action="novedades", extra="anime", title="Episodios de anime",
thumbnail=thumbnail)
set_category_context(new_item)
itemlist.append(new_item)
if list_canales['documentales']:
thumbnail = get_thumb("channels_documentary.png")
new_item = Item(channel=item.channel, action="novedades", extra="documentales", title="Documentales",
thumbnail=thumbnail)
new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title,
"extra": new_item.extra,
"action": "setting_channel",
"channel": new_item.channel}]
new_item.category = "Novedades en %s" % new_item.extra
itemlist.append(new_item)
new_item = Item(channel=item.channel, action="novedades", extra="documentales", title="Documentales",
thumbnail=thumbnail)
set_category_context(new_item)
itemlist.append(new_item)
return itemlist
def set_category_context(item):
item.context = [{"title": "Canales incluidos en: %s" % item.title,
"extra": item.extra,
"action": "setting_channel",
"channel": item.channel}]
item.category = "Novedades en %s" % item.extra
def get_channels_list():
logger.info()

View File

@@ -9,18 +9,18 @@ from core import servertools
from core.item import Item
from platformcode import logger, config
host = "http://www.peliculasrey.com/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="PorFecha", title="Año de Lanzamiento", url="http://www.peliculasrey.com"))
itemlist.append(Item(channel=item.channel, action="Idiomas", title="Idiomas", url="http://www.peliculasrey.com"))
itemlist.append(
Item(channel=item.channel, action="calidades", title="Por calidad", url="http://www.peliculasrey.com"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url="http://www.peliculasrey.com"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url="http://www.peliculasrey.com"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Recientes", url=host))
itemlist.append(Item(channel=item.channel, action="PorFecha", title="Año de Lanzamiento", url=host))
itemlist.append(Item(channel=item.channel, action="Idiomas", title="Idiomas", url=host))
itemlist.append(Item(channel=item.channel, action="calidades", title="Por calidad", url=host))
itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url=host))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url=host))
return itemlist
@@ -31,7 +31,6 @@ def PorFecha(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="lanzamiento">(.*?)</section>')
logger.info("data=" + data)
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
@@ -43,7 +42,6 @@ def PorFecha(item):
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
@@ -57,7 +55,6 @@ def Idiomas(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="idioma">(.*?)</section>')
logger.info("data=" + data)
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
@@ -69,7 +66,6 @@ def Idiomas(item):
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
@@ -83,7 +79,6 @@ def calidades(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="calidades">(.*?)</section>')
logger.info("data=" + data)
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
@@ -95,7 +90,6 @@ def calidades(item):
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
@@ -106,12 +100,8 @@ def calidades(item):
def generos(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="generos">(.*?)</section>')
logger.info("data=" + data)
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
@@ -121,7 +111,6 @@ def generos(item):
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if "Adulto" in title and config.get_setting("adult_mode") == 0:
continue
itemlist.append(
@@ -134,7 +123,7 @@ def generos(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.peliculasrey.com/?s=" + texto
item.url = host + "?s=" + texto
try:
# return buscar(item)
@@ -152,7 +141,6 @@ def peliculas(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
logger.info("data=" + data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
tabla_pelis = scrapertools.find_single_match(data,
@@ -163,13 +151,11 @@ def peliculas(item):
itemlist = []
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot="", fulltitle=scrapedtitle))
next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)')
if next_page != "":
# itemlist.append( Item(channel=item.channel, action="peliculas" , title=">> Página siguiente" , url=item.url+next_page, folder=True, viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page, folder=True,
viewmode="movie"))
@@ -180,40 +166,46 @@ def peliculas(item):
def findvideos(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# logger.info("data="+data)
# Extrae las entradas (carpetas)
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
itemtemp = []
for scrapedurl, nombre_servidor, idioma, calidad in matches:
idioma = idioma.strip()
calidad = calidad.strip()
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad + ")"
url = scrapedurl
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
folder=False))
return itemlist
def play(item):
logger.info("url=" + item.url)
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
if "youapihd" in nombre_servidor.lower():
nombre_servidor = "gvideo"
if "pelismundo" in scrapedurl:
data = httptools.downloadpage(scrapedurl, add_referer = True).data
patron = 'sources.*?}],'
bloque = scrapertools.find_single_match(data, patron)
patron = 'file.*?"([^"]+)".*?label:"([^"]+)"'
match = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl1, scrapedlabel1 in match:
itemtemp.append([scrapedlabel1, scrapedurl1])
itemtemp.sort(key=lambda it: int(it[0].replace("p", "")))
for videoitem in itemtemp:
itemlist.append(Item(channel = item.channel,
action = "play",
extra = "hdvids",
fulltitle = item.title,
server = "directo",
thumbnail = item.thumbnail,
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + videoitem[0] + ")",
url = videoitem[1]
))
else:
itemlist.append(Item(channel=item.channel,
action = "play",
extra = "",
fulltitle = item.title,
server = "",
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad + ")",
thumbnail = item.thumbnail,
url = scrapedurl,
folder = False
))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

129
plugin.video.alfa/channels/pelismagnet.py Executable file → Normal file
View File

@@ -84,7 +84,7 @@ def menu_genero(item):
itemlist = []
response = httptools.downloadpage("https://kproxy.com/")
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(host + "/principal")
response = httptools.downloadpage(url, post, follow_redirects=False).data
@@ -108,7 +108,7 @@ def series(item):
logger.info()
itemlist = []
response = httptools.downloadpage("https://kproxy.com/")
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
@@ -126,17 +126,17 @@ def series(item):
punt = i.get("puntuacio", "")
valoracion = ""
if punt and not 0:
valoracion = " (Val: {punt})".format(punt=punt)
valoracion = " (Val: %s)" % punt
title = "{nombre}{val}".format(nombre=i.get("nom", ""), val=valoracion)
url = "{url}?id={id}".format(url=api_temp, id=i.get("id", ""))
title = "%s%s" % (i.get("nom", ""), valoracion)
url = "%s?id=%s" % (api_temp, i.get("id", ""))
thumbnail = ""
fanart = ""
if i.get("posterurl", ""):
thumbnail = "http://image.tmdb.org/t/p/w342{file}".format(file=i.get("posterurl", ""))
thumbnail = "http://image.tmdb.org/t/p/w342%s" % i.get("posterurl", "")
if i.get("backurl", ""):
fanart = "http://image.tmdb.org/t/p/w1280{file}".format(file=i.get("backurl", ""))
fanart = "http://image.tmdb.org/t/p/w1280%s" % i.get("backurl", "")
plot = i.get("info", "")
if plot is None:
@@ -165,7 +165,7 @@ def episodios(item):
logger.info()
itemlist = []
response = httptools.downloadpage("https://kproxy.com/")
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
@@ -173,48 +173,61 @@ def episodios(item):
data = httptools.downloadpage(url).data
data = jsontools.load(data)
for i in data.get("temporadas", []):
titulo = "{temporada} ({total} Episodios)".format(temporada=i.get("nomtemporada", ""),
total=len(i.get("capituls", "0")))
itemlist.append(Item(channel=item.channel, action="episodios", title=titulo, url=item.url,
server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, plot=data.get("info", ""),
folder=False))
dict_episodes = dict()
for i in data.get("temporadas", []):
for j in i.get("capituls", []):
numero = j.get("infocapitul", "")
if not numero:
numero = "{temp}x{cap}".format(temp=i.get("numerotemporada", ""), cap=j.get("numerocapitul", ""))
numero = j.get("infocapitul", "%sx%s" % (i.get("numerotemporada", 0), j.get("numerocapitul", 0)))
titulo = j.get("nomcapitul", "")
if not titulo:
titulo = "Capítulo {num}".format(num=j.get("numerocapitul", ""))
if numero not in dict_episodes:
dict_episodes[numero] = {}
dict_episodes[numero]["title"] = j.get("nomcapitul", "Episodio %s" % j.get("numerocapitul", ""))
calidad = ""
if j.get("links", {}).get("calitat", ""):
calidad = " [{calidad}]".format(calidad=j.get("links", {}).get("calitat", ""))
season = i.get("numerotemporada", 0)
if type(season) == str:
season = 0
dict_episodes[numero]["season"] = season
title = " {numero} {titulo}{calidad}".format(numero=numero, titulo=titulo, calidad=calidad)
episode = j.get("numerocapitul", 0)
if type(episode) == str:
episode = 0
dict_episodes[numero]["episode"] = episode
if j.get("links", {}).get("magnet"):
dict_episodes[numero]["url"] = [j.get("links", {}).get("magnet")]
dict_episodes[numero]["quality"] = [j.get("links", {}).get("calitat", "")]
dict_episodes[numero]["plot"] = j.get("overviewcapitul", "")
if j.get("links", {}).get("magnet", ""):
url = j.get("links", {}).get("magnet", "")
else:
return [Item(channel=item.channel, title='No hay enlace magnet disponible para este capitulo')]
if dict_episodes[numero]["title"] == "":
dict_episodes[numero]["title"] = j.get("nomcapitul", "Episodio %s" % j.get("numerocapitul", ""))
plot = i.get("overviewcapitul", "")
if plot is None:
plot = ""
if j.get("links", {}).get("magnet"):
dict_episodes[numero]["url"].append(j.get("links", {}).get("magnet"))
dict_episodes[numero]["quality"].append(j.get("links", {}).get("calitat", ""))
infoLabels = item.infoLabels
if plot:
infoLabels["plot"] = plot
infoLabels["season"] = i.get("numerotemporada")
infoLabels["episode"] = j.get("numerocapitul")
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, server="torrent", infoLabels=infoLabels,
thumbnail=item.thumbnail, fanart=item.fanart, show=item.show, contentTitle=item.contentTitle,
contentSeason=i.get("numerotemporada"), contentEpisodeNumber=j.get("numerocapitul")))
if dict_episodes[numero]["plot"] == "":
dict_episodes[numero]["plot"] = j.get("overviewcapitul", "")
# logger.debug("\n\n\n dict_episodes: %s " % dict_episodes)
for key, value in dict_episodes.items():
list_no_duplicate = list(set(value["quality"]))
title = "%s %s [%s]" % (key, value["title"], "][".join(list_no_duplicate))
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, server="torrent",
thumbnail=item.thumbnail, fanart=item.fanart, show=item.show, data=value,
contentTitle=item.contentTitle, contentSeason=value["season"],
contentEpisodeNumber=value["episode"]))
# order list
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
return itemlist
@@ -224,7 +237,7 @@ def pelis(item):
itemlist = []
response = httptools.downloadpage("https://kproxy.com/")
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
@@ -242,34 +255,35 @@ def pelis(item):
valoracion = ""
if punt and not 0:
valoracion = " (Val: {punt})".format(punt=punt)
valoracion = " (Val: %s)" % punt
if i.get("magnets", {}).get("M1080", {}).get("magnet", ""):
url = i.get("magnets", {}).get("M1080", {}).get("magnet", "")
calidad = "[{calidad}]".format(calidad=i.get("magnets", {}).get("M1080", {}).get("quality", ""))
calidad = "[%s]" % i.get("magnets", {}).get("M1080", {}).get("quality", "")
else:
url = i.get("magnets", {}).get("M720", {}).get("magnet", "")
calidad = "[{calidad}]".format(calidad=i.get("magnets", {}).get("M720", {}).get("quality", ""))
calidad = "[%s]" % (i.get("magnets", {}).get("M720", {}).get("quality", ""))
if not url:
continue
title = "{nombre} {calidad}{val}".format(nombre=i.get("nom", ""), val=valoracion, calidad=calidad)
title = "%s %s%s" % (i.get("nom", ""), valoracion, calidad)
thumbnail = ""
fanart = ""
if i.get("posterurl", ""):
thumbnail = "http://image.tmdb.org/t/p/w342{file}".format(file=i.get("posterurl", ""))
thumbnail = "http://image.tmdb.org/t/p/w342%s" % i.get("posterurl", "")
if i.get("backurl", ""):
fanart = "http://image.tmdb.org/t/p/w1280{file}".format(file=i.get("backurl", ""))
fanart = "http://image.tmdb.org/t/p/w1280%s" % i.get("backurl", "")
plot = i.get("info", "")
if plot is None:
plot = ""
infoLabels = {'plot': plot, 'year': i.get("year"), 'tmdb_id': i.get("id")}
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, server="torrent",
thumbnail=thumbnail, fanart=fanart, infoLabels=infoLabels, contentTitle=i.get("nom")))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, server="torrent",
contentType="movie", thumbnail=thumbnail, fanart=fanart, infoLabels=infoLabels,
contentTitle=i.get("nom")))
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -298,3 +312,22 @@ def search(item, texto):
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
if item.contentType == "movie":
item.title = "Enlace Torrent"
item.action = "play"
itemlist.append(item)
else:
data = item.data
for index, url in enumerate(data["url"]):
title = "Enlace torrent [%s]" % data["quality"][index]
itemlist.append(item.clone(action="play", title=title, url=url))
return itemlist

View File

@@ -542,7 +542,7 @@ try:
elif item.action == "play" and not self.item.windowed:
for window in window_select:
window.close()
retorna = platformtools.play_video(item)
retorna = platformtools.play_video(item, force_direct=True)
if not retorna:
while True:
xbmc.sleep(1000)

View File

@@ -4,7 +4,7 @@
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
@@ -66,7 +66,7 @@ def list_all(item):
plot=plot,
contentErieName=contentSerieName
))
itemlist = get_thumb(templist)
itemlist = serie_thumb(templist)
# Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i))
@@ -110,7 +110,7 @@ def episodios(item):
return itemlist
def get_thumb(itemlist):
def serie_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
@@ -135,7 +135,7 @@ def search_list(item):
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
thumbnail = get_thumb('thumb_next.png')))
return itemlist

View File

@@ -0,0 +1,29 @@
{
"id": "yespornplease",
"name": "YesPornPlease",
"active": true,
"adult": true,
"language": "es",
"thumbnail": "yespornplease.png",
"banner": "yespornplease.png",
"version": 1,
"changes": [
{
"date": "27/08/2017",
"description": "Canal creado"
}
],
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,100 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core.item import Item
from platformcode import logger
from urlparse import urljoin
from core import servertools
HOST="http://yespornplease.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="links", title="Novedades", url=HOST))
itemlist.append(item.clone(action="categories", title="Categorías", url=urljoin(HOST, "categories")))
itemlist.append(item.clone(action="search", title="Buscar", url=urljoin(HOST, "search")))
return itemlist
def search(item, texto):
logger.info("texto = %s" %(texto))
item.url = urljoin(HOST, "search&q=" + texto)
try:
return links(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categories(item):
logger.info()
data = httptools.downloadpage(item.url).data
result = []
categories = re.findall("href=[\"'](?P<url>/search[^\"']+).*?>(?P<name>[^<>]+)</div>.*?badge[^>]+>(?P<counter>\d+)", data, re.DOTALL | re.MULTILINE)
for url, name, counter in categories:
result.append(item.clone(action = "links", title = "%s (%s videos)" % (name, counter), url = urljoin(item.url, url)))
return result
def get_page(url):
page = re.search("p=(\d+)", url)
if page:
return int(page.group(1))
return 1
def get_page_url(url, page):
logger.debug("URL: %s to page %d" % (url, page))
resultURL = re.sub("([&\?]p=)(?:\d+)", "\g<1>%d" % page, url)
if resultURL == url:
resultURL += ("&" if "?" in url else "?") + "p=%d" % (page)
logger.debug("Result: %s" % (resultURL))
return resultURL
def links(item):
logger.info()
data = httptools.downloadpage(item.url).data
reExpr = "<img\s+src=['\"](?P<img>[^'\"]+)[^>]+title[^'\"]*['\"](?P<title>[^\"]+)[^>]+id[^'\"]*['\"](?P<id>[^'\"]+)[^>]*>(?:[^<]*<[^>]+>(?P<quality>[^<]+)<)?[^<]*<[^>]*duration[^>]*>(?P<duration>[^<]+)"
reResults = re.findall(reExpr, data, re.MULTILINE | re.DOTALL)
result = []
for img, title, vID, quality, duration in reResults:
logger.info("[link] %(title)s [%(quality)s] [%(duration)s]: %(vid)s (%(img)s" % ({"title": title, "duration": duration, "vid": vID, "img": img, "quality": quality if quality else "--"}))
formattedQuality = ""
if quality:
formattedQuality += " [%s]" % (quality)
titleFormatted = "%(title)s%(quality)s [%(duration)s]" % ({"title": title, "quality": formattedQuality, "duration": duration})
result.append(item.clone(action = "play", title = titleFormatted, url = urljoin(item.url, "/view/%s" % (vID)), thumbnail = urljoin(item.url, img), vID = vID))
# Has pagination
paginationOccurences = data.count('class="prevnext"')
if paginationOccurences:
page = get_page(item.url)
logger.info("Page " + str(page) + " Ocurrences: " + str(paginationOccurences))
if page > 1:
result.append(item.clone(action = "links", title = "<< Anterior", url = get_page_url(item.url, page - 1)))
if paginationOccurences > 1 or page == 1:
result.append(item.clone(action = "links", title = "Siguiente >>", url = get_page_url(item.url, page + 1)))
return result
def play(item):
logger.info(item)
embededURL = urljoin(item.url, "/e/%s/width-650/height-400/autoplay-0/" % (item.vID))
itemlist = servertools.find_video_items(item.clone(url = embededURL))
return itemlist

View File

@@ -452,7 +452,7 @@ def is_playing():
return xbmc.Player().isPlaying()
def play_video(item, strm=False):
def play_video(item, strm=False, force_direct=False):
logger.info()
# logger.debug(item.tostring('\n'))
@@ -503,7 +503,17 @@ def play_video(item, strm=False):
xlistitem.setProperty('inputstream.adaptive.manifest_type', 'mpd')
# se lanza el reproductor
set_player(item, xlistitem, mediaurl, view, strm)
if force_direct: # cuando viene de una ventana y no directamente de la base del addon
# Añadimos el listitem a una lista de reproducción (playlist)
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
playlist.add(mediaurl, xlistitem)
# Reproduce
xbmc_player = xbmc.Player()
xbmc_player.play(playlist, xlistitem)
else:
set_player(item, xlistitem, mediaurl, view, strm)
def stop_video():

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -48,5 +48,6 @@
"visible": false
}
],
"thumbnail": "https://s11.postimg.org/giobzkprz/logo-google1.png",
"version": 1
}
}

View File

@@ -11,7 +11,7 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "<title>watch </title>" in data.lower():
return False, "[kingvid] El archivo no existe o ha sido borrado"
return False, "[kingvid] El archivo no existe o ha sido borrado"
return True, ""
@@ -19,7 +19,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
data = httptools.downloadpage(page_url, add_referer = True).data
match = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
data = jsunpack.unpack(match)

View File

@@ -0,0 +1,50 @@
{
"active": true,
"changes": [
{
"date": "27/08/2017",
"description": "Versión incial"
}
],
"find_videos": {
"patterns": [
{
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']",
"url": "\\1"
}
]
},
"free": true,
"id": [
"vshare"
],
"name": "vshare",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_vshare.png",
"version": 1
}

View File

@@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
from lib import jsunpack
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
if httptools.downloadpage(page_url).code != 200:
return False, "El archivo no existe en vShare o ha sido borrado."
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url = " + page_url)
data = httptools.downloadpage(page_url).data
flowplayer = re.search("url: [\"']([^\"']+)", data)
if flowplayer:
return [["FLV", flowplayer.group(1)]]
jsUnpack = jsunpack.unpack(data)
logger.debug(jsUnpack)
video_urls = []
fields = re.search("\[([^\]]+).*?parseInt\(value\)-(\d+)", jsUnpack)
if fields:
logger.debug("Values: " + fields.group(1))
logger.debug("Substract: " + fields.group(2))
substract = int(fields.group(2))
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult)
logger.debug(strResult)
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources:
logger.debug("[" + label + "] " + url)
video_urls.append([label, url])
return video_urls