Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2019-01-11 08:52:54 -03:00
69 changed files with 2134 additions and 590 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.19" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.21" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -10,8 +10,8 @@
<extension point="xbmc.addon.metadata">
<summary lang="es">Navega con Kodi por páginas web.</summary>
<assets>
<icon>logo-n.jpg</icon>
<fanart>fanart-xmas.jpg</fanart>
<icon>logo-cumple.png</icon>
<fanart>fanart1.jpg</fanart>
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
@@ -19,16 +19,12 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ allcalidad ¤ asialiveaction ¤ repelis
¤ DoramasMP4 ¤ CanalPelis ¤ Vi2
¤ PeliculonHD ¤ PeliculasHD ¤ UltraPeliculasHD
¤ Newpct1 ¤ maxipelis24 ¤ repelis.live
¤ cuevana2 ¤ cuevana2espanol
[COLOR green][B]Novedades[/B][/COLOR]
¤ pack +18
¤ Agradecimientos a @mrgaturus, @GeorgeRamga y @chivmalev por colaborar con ésta versión
¤ Grantorrent ¤ Zonatorrent ¤ pack +18
¤ erotik ¤ pelis24 ¤ cineasiaenlinea
¤ pelisgratis ¤ repelis ¤ descargacineclasico
¤ Goovie ¤ PelisFox ¤ PelisPlus
¤ TvSeriesDk ¤ UqLoad ¤ Vi2
¤ gnula.biz
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -50,11 +50,13 @@ def catalogo(item):
scrapedplot = ""
scrapedurl = host + scrapedurl
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" ,
text_color="blue", url=next_page_url , folder=True) )
return itemlist
@@ -62,12 +64,17 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="c-thumb">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<div class="c-thumb--overlay c-thumb--overlay-title">([^"]+)</div>.*?<span>(.*?)</span>'
patron = '<a class="categories-list__link" href="([^"]+)">.*?'
patron += '<span class="categories-list__name cat-icon" data-title="([^"]+)">.*?'
patron += '<span class="categories-list__badge">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,num in matches:
for scrapedurl,scrapedtitle,num in matches:
url = urlparse.urljoin(item.url,scrapedurl)
scrapedthumbnail = ""
scrapedplot = ""
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=url ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
@@ -76,7 +83,9 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?<span class="thumb__duration">(.*?)</span>'
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="thumb__duration">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
contentTitle = scrapedtitle
@@ -84,11 +93,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" ,
text_color="blue", url=next_page_url , folder=True) )
return itemlist

View File

@@ -19,7 +19,7 @@ def mainlist(item):
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/wall-date-1.html"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="peliculas", url=host + "/wall-time-1.html"))
@@ -52,7 +52,8 @@ def categorias(item):
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
@@ -60,6 +61,7 @@ def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<div class="thumb-main-titre"><a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
@@ -71,11 +73,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ) )
return itemlist

View File

@@ -165,7 +165,7 @@ def findvideos(item):
if contentTitle != "":
item.contentTitle = contentTitle
bloque = scrapertools.find_single_match(data, '(?s)<div class="bottomPlayer">(.*?)<script>')
match = scrapertools.find_multiple_matches(bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]+)')
match = scrapertools.find_multiple_matches(bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]*)')
for dataurl, datapostid in match:
page_url = host + "wp-admin/admin-ajax.php"
post = "action=get_more_top_news&postID=%s&dataurl=%s" %(datapostid, dataurl)
@@ -178,7 +178,7 @@ def findvideos(item):
if "youtube" in url:
titulo = "Ver trailer: %s"
text_color = "yellow"
if "ad.js" in url or "script" in url or "jstags.js" in url:
if "ad.js" in url or "script" in url or "jstags.js" in url or not datapostid:
continue
elif "vimeo" in url:
url += "|" + "http://www.allcalidad.com"

View File

@@ -10,14 +10,13 @@ from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.alsoporn.com/en'
host = 'http://www.alsoporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host +"/g/All/new/1"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/"))
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -41,12 +40,14 @@ def catalogo(item):
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
patron = '<li><a href="([^"]+)" title="">.*?'
patron += '<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,cantidad,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
@@ -60,9 +61,9 @@ def categorias(item):
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = scrapedurl.replace("top", "new")
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
@@ -82,23 +83,26 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url , folder=True) )
return itemlist
def findvideos(item):
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
data = scrapertools.cachePage(scrapedurl)
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.synergytube.xyz/ec/i2.php?")
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.trinitytube.xyz/ec/i2.php?")
data = scrapertools.cachePage(scrapedurl1)
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)" type=\'video/mp4\'>')
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)"')
itemlist.append(item.clone(action="play", title=item.title, fulltitle = item.title, url=scrapedurl2))
return itemlist

View File

@@ -19,7 +19,7 @@ def mainlist(item):
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/más-visto/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/mejor-valorado/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url="https://www.analdin.com/categories/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -48,11 +48,13 @@ def catalogo(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
@@ -69,14 +71,15 @@ def categorias(item):
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="popup-video-link" href="([^"]+)".*?'
patron += 'thumb="([^"]+)".*?'
@@ -89,11 +92,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
@@ -108,15 +113,3 @@ def play(item):
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
return itemlist
# def play(item):
# logger.info()
# itemlist = []
# data = scrapertools.cachePage(item.url)
# patron = 'video_url: \'([^\']+)\''
# matches = scrapertools.find_multiple_matches(data, patron)
# for scrapedurl in matches:
# title = scrapedurl
# itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=scrapedurl))
# return itemlist

View File

@@ -90,10 +90,13 @@ def mainlist(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -107,10 +110,11 @@ def lista(item):
post = {'tipo': 'episodios', '_token': 'rAqVX74O9HVHFFigST3M9lMa5VL7seIO7fT8PBkl'}
post = urllib.urlencode(post)
data = get_source(item.url)
patron = 'class=anime><div class=cover style=background-image: url(.*?)>.*?<a href=(.*?)><h2>(.*?)<\/h2><\/a><\/div>'
patron = 'class="anime"><a href="([^"]+)">'
patron +='<div class="cover" style="background-image: url\((.*?)\)">.*?<h2>([^<]+)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
title = scrapedtitle
@@ -124,13 +128,13 @@ def lista(item):
# Paginacion
next_page = scrapertools.find_single_match(data,
'<a href=([^ ]+) rel=next>&raquo;</a>')
'<a href="([^"]+)" data-ci-pagination-page="\d+" rel="next"')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="lista",
title=">> Página siguiente",
url=host+next_page_url,
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
@@ -158,7 +162,7 @@ def generos(item):
itemlist = []
data = get_source(item.url)
patron = '<li class=><a href=https:\/\/www\.animeshd\.tv\/genero\/(.*?)>(.*?)<\/a><\/li>'
patron = '<a href="https:\/\/www\.animeshd\.tv\/genero\/([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -180,8 +184,8 @@ def episodios(item):
itemlist = []
data = get_source(item.url)
patron = '<li id=epi-.*? class=list-group-item.*?><a href=(.*?) class=badge.*?width=25 title=(.*?)>.*?<\/span>(' \
'.*?) (\d+)<\/li>'
patron = '<li id="epi-.*? class="list-group-item.*?"><a href="([^"]+)".*?'
patron += 'class="badge".*?width="25" title="([^"]+)">.*?<\/span>(.*?) (\d+)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels

View File

@@ -12,6 +12,7 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
host = "http://www.asialiveaction.com"
IDIOMAS = {'Japones': 'Japones'}
@@ -66,7 +67,7 @@ def search_results(item):
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle ,scrapedurl in matches:
title="%s [%s]" % (scrapedtitle,scrapedyear)
title="%s [%s]" % (scrapedtitle, scrapedyear)
new_item= Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail)
if scrapedtype.strip() == 'Serie':
new_item.contentSerieName = scrapedtitle
@@ -191,6 +192,8 @@ def findvideos(item):
if "spotify" in url:
continue
data = httptools.downloadpage(url).data
language = scrapertools.find_single_match(data, '(?:ɥɔɐәlq|lɐʇәɯllnɟ) (\w+)')
if not language: language = "VOS"
bloque = scrapertools.find_single_match(data, "description articleBody(.*)/div")
urls = scrapertools.find_multiple_matches(bloque, "iframe src='([^']+)")
if urls:
@@ -199,17 +202,17 @@ def findvideos(item):
if "luis" in url1:
data = httptools.downloadpage(url1).data
url1 = scrapertools.find_single_match(data, 'file: "([^"]+)')
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = url1))
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url1))
else:
# cuando es descarga
bloque = bloque.replace('"',"'")
urls = scrapertools.find_multiple_matches(bloque, "href='([^']+)")
for url2 in urls:
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = url2))
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url2))
if "data-video" in bloque:
urls = scrapertools.find_multiple_matches(bloque, 'data-video="([^"]+)')
urls = scrapertools.find_multiple_matches(bloque, "data-video='([^']+)")
for url2 in urls:
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = "https://tinyurl.com/%s" %url2 ))
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = "https://tinyurl.com/%s" %url2 ))
for item1 in itemlist:
if "tinyurl" in item1.url:
item1.url = httptools.downloadpage(item1.url, follow_redirects=False, only_headers=True).headers.get("location", "")

View File

@@ -42,23 +42,28 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th">.*?<img src="([^"]+)".*?<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
patron = '<a href="([^"]+)" class="th">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class=".*?video_block"><a href="([^"]+)".*?<img src="([^"]+)".*?alt="([^"]+)".*?<span class="time">([^"]+)</span>'
patron = '<div class=".*?video_block"><a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?alt="([^"]+)".*?'
patron += '<span class="time">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
@@ -66,21 +71,23 @@ def peliculas(item):
thumbnail = "https:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" type=\'video/mp4\' title="HQ" />'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, folder=True))
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl))
return itemlist

View File

@@ -13,6 +13,7 @@ from core import jsontools
host = 'https://www.camwhoresbay.com'
# EN CATALOGO Y BUSQUEDA LA PAGINACION FUNCIONA CON UN AJAX
def mainlist(item):
logger.info()
@@ -20,7 +21,6 @@ def mainlist(item):
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -51,7 +51,8 @@ def categorias(item):
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -70,11 +71,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ) )
return itemlist

View File

@@ -0,0 +1,61 @@
{
"id": "cineasiaenlinea",
"name": "CineAsiaEnLinea",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5KOU8uy.png?3",
"banner": "cineasiaenlinea.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en búsqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,177 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = "http://www.cineasiaenlinea.com/"
__channel__='cineasiaenlinea'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
# Configuracion del canal
__perfil__ = int(config.get_setting('perfil', 'cineasiaenlinea'))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
if __perfil__ - 1 >= 0:
color1, color2, color3 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = ""
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Novedades", url=host + "archivos/peliculas",
thumbnail=get_thumb('newest', auto=True), text_color=color1,))
itemlist.append(item.clone(action="peliculas", title="Estrenos", url=host + "archivos/estrenos",
thumbnail=get_thumb('premieres', auto=True), text_color=color1))
itemlist.append(item.clone(action="indices", title="Por géneros", url=host,
thumbnail=get_thumb('genres', auto=True), text_color=color1))
itemlist.append(item.clone(action="indices", title="Por país", url=host, text_color=color1,
thumbnail=get_thumb('country', auto=True)))
itemlist.append(item.clone(action="indices", title="Por año", url=host, text_color=color1,
thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = "%s?s=%s" % (host, texto.replace(" ", "+"))
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + "archivos/peliculas"
elif categoria == 'terror':
item.url = host + "genero/terror"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<h3><a href="([^"]+)">([^<]+)<.*?src="([^"]+)".*?<a rel="tag">([^<]+)<' \
'.*?<a rel="tag">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, year, calidad in matches:
title = re.sub(r' \((\d+)\)', '', scrapedtitle)
scrapedtitle += " [%s]" % calidad
infolab = {'year': year}
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels=infolab,
contentTitle=title, contentType="movie", quality=calidad))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page))
return itemlist
def indices(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
logger.info(data)
if "géneros" in item.title:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por genero</h4>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<')
elif "año" in item.title:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Año</h4>(.*?)</select>')
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)">([^<]+)<')
else:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Pais</h4>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<')
for scrapedurl, scrapedtitle in matches:
if "año" in item.title:
scrapedurl = "%sfecha-estreno/%s" % (host, scrapedurl)
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, text_color=color3))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
item.infoLabels["plot"] = scrapertools.find_single_match(data, '(?i)<h2>SINOPSIS.*?<p>(.*?)</p>')
item.infoLabels["trailer"] = scrapertools.find_single_match(data, 'src="(http://www.youtube.com/embed/[^"]+)"')
itemlist = servertools.find_video_items(item=item, data=data)
for it in itemlist:
it.thumbnail = item.thumbnail
it.text_color = color2
itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca"))
if item.infoLabels["trailer"]:
folder = True
if config.is_xbmc():
folder = False
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Ver Trailer", folder=folder,
contextual=not folder))
return itemlist

View File

@@ -38,14 +38,13 @@ def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_generos = '<ul id="menu-submenu" class=""><li id="menu-item-.+?"(.+)<\/li><\/ul>'
data_generos = scrapertools.find_single_match(data, patron_generos)
patron = 'class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_generos, patron)
patron = 'level-0.*?value="([^"]+)"'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle != 'Próximas Películas':
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=scrapedurl))
if 'Próximas Películas' in scrapedtitle:
continue
itemlist.append(item.clone(action='lista', title=scrapedtitle, cat=scrapedurl))
return itemlist
@@ -90,13 +89,18 @@ def proximas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if not item.cat:
data = httptools.downloadpage(item.url).data
else:
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(url).data
bloque = scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<div class="fixyear">(.*?)</span></div><' # scrapedfixyear
matches = scrapertools.find_multiple_matches(data, patron)
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedfixyear in matches:
patron = '<span class="year">([^<]+)' # scrapedyear
scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
@@ -113,14 +117,8 @@ def lista(item):
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
scrapertools.printMatches(itemlist)
# Paginacion
patron_genero = '<h1>([^"]+)<\/h1>'
genero = scrapertools.find_single_match(data, patron_genero)
if genero == "Romance" or genero == "Drama":
patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente "
else:
patron = "<span class='current'>.+?href='(.+?)'>"
patron = 'rel="next" href="([^"]+)'
next_page_url = scrapertools.find_single_match(data, patron)
if next_page_url != "":
item.url = next_page_url

View File

@@ -11,7 +11,7 @@ from core.item import Item
from platformcode import config, platformtools, logger
from channelselector import get_thumb
host = "http://www.clasicofilm.com/"
host = "http://www.classicofilm.com/"
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', 'clasicofilm')
__perfil__ = config.get_setting('perfil', 'clasicofilm')
@@ -34,8 +34,8 @@ def mainlist(item):
itemlist.append(item.clone(action="peliculas", title=" Novedades",
url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost",
thumbnail=get_thumb('newest', auto=True), text_color=color1))
itemlist.append(item.clone(action="generos", title=" Por géneros", url=host,
thumbnail=get_thumb('genres', auto=True), text_color=color1))
#itemlist.append(item.clone(action="generos", title=" Por géneros", url=host,
# thumbnail=get_thumb('genres', auto=True), text_color=color1))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
@@ -92,12 +92,16 @@ def peliculas(item):
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
bb=jsontools.dump(entry["author"])
aa=scrapertools.find_single_match(bb, '(?s)src": "([^"]+)')
if "Enviar comentarios" in entry: continue
for link in entry["link"]:
if link["rel"] == "alternate":
title = link["title"]
url = link["href"]
break
thumbnail = entry["media$thumbnail"]["url"].replace("s72-c/", "")
thumbnail = "https:" + bb
thumbnail = thumbnail.replace("s72-c/", "") #"" #entry["media$thumbnail"]["url"].replace("s72-c/", "")
try:
title_split = re.split(r"\s*\((\d)", title, 1)
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')

View File

@@ -47,11 +47,13 @@ def catalogo(item):
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
@@ -66,14 +68,15 @@ def categorias(item):
scrapedplot = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<img class=".*?" src="([^"]+)".*?<div class="tr">(.*?)</div>.*?<a href="([^"]+)\s*" class="vttl.*?">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
@@ -83,25 +86,26 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '"url"\:"(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
title = scrapedurl
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -7,6 +7,8 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host ='http://www.coomelonitas.com'
@@ -34,21 +36,22 @@ def search(item, texto):
def categorias(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="all"(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
@@ -56,9 +59,11 @@ def peliculas(item):
url = scrapertools.find_single_match(match,'<a href="([^"]+)"')
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
itemlist.append( Item(channel=item.channel, action="findvideos", title=title , fulltitle=title, url=url , thumbnail=thumbnail , plot=plot , viewmode="movie", folder=True) )
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, viewmode="movie") )
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
if next_page_url!="":
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist

View File

@@ -198,7 +198,7 @@ def findvideos(item):
for link in scrapertools.find_multiple_matches(data, pattern):
#php.*?=(\w+)&
#url=(.*?)&
if 'player4' in link:
if 'player' in link:
logger.info("CUEVANA LINK %s" % link)
if r'%2Fopenload%2F' in link:
link = scrapertools.find_single_match(link, 'h%3D(\w+)')

View File

@@ -160,17 +160,15 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'data-type="(tv).*?'
patron += 'data-post="([^"]+).*?'
patron += 'data-nume="([^"]+).*?'
patron += 'server">([^<]+).*?'
patron = 'player-option-\d+.*?'
patron += 'data-sv="([^"]+).*?'
patron += 'data-user="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
url1 = devuelve_enlace(url1)
for scrapedserver, scrapeduser in matches:
data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data
url = base64.b64decode(scrapertools.find_single_match(data1, 'hashUser = "([^"]+)'))
url1 = devuelve_enlace(url)
if "drive.google" in url1:
url1 = url1.replace("view","preview")
if url1:
@@ -192,11 +190,11 @@ def play(item):
def devuelve_enlace(url1):
if 'danimados' in url1:
url = 'https:'+url1.replace('stream/', 'stream_iframe/')
id = scrapertools.find_single_match(url, 'iframe/(.*)')
url = url.replace(id, base64.b64encode(id))
url = 'https:' + url1
new_data = httptools.downloadpage(url).data
new_data = new_data.replace('"',"'")
url1 = scrapertools.find_single_match(new_data, "iframe src='([^']+)")
new_data = httptools.downloadpage(url1).data
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
if "zkstream" in url or "cloudup" in url:
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")

View File

@@ -16,10 +16,11 @@ host = 'http://www.elreyx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Escenas" , action="escenas", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Productora" , action="productora", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +28,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://elreyx.com/search-%s" % texto + ".html"
item.url = host + "/search-%s" % texto + ".html"
try:
return escenas(item)
except:
@@ -44,10 +45,12 @@ def productora(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^<]+)" title="View Category ([^<]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="escenas", title=scrapedtitle , url="https:" + scrapedurl , thumbnail="https:" + scrapedthumbnail , plot=scrapedplot , folder=True) )
thumbnail="https:" + scrapedthumbnail
url="https:" + scrapedurl
itemlist.append( Item(channel=item.channel, action="escenas", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
return itemlist
@@ -58,73 +61,64 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td><a href="([^<]+)" title="Movies ([^<]+)">.*?</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
url="https:" + scrapedurl
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url="https:" + scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def escenas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="notice_image">.*?<a title="([^"]+)" href="([^"]+)">.*?<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url="https:" + scrapedurl , thumbnail="https:" + scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' title=\'Pagina \d+\'><span class="visible-xs-inline">Siguiente</span> &raquo;</a>')
next_page_url = "http://www.elreyx.com/"+str(next_page_url)
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="escenas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
url="https:" + scrapedurl
thumbnail="https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>')
if next_page == "":
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!= "":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="escenas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="captura"><a title="([^"]+)" href="([^"]+)".*?><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url="https:" + scrapedurl , thumbnail="https:" + scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
next_page_url = "http://www.elreyx.com/"+str(next_page_url)
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<iframe src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
scrapedplot = ""
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )
patron = '<IFRAME SRC="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
scrapedplot = ""
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )
url="https:" + scrapedurl
thumbnail="https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<IFRAME SRC="(.*?)"')
if url == "":
url = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
data = httptools.downloadpage(url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
@@ -132,3 +126,4 @@ def play(item):
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -35,22 +35,6 @@ def search(item, texto):
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class=""\s+title="([^"]+)"\s+href="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = host + scrapedurl
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
@@ -61,7 +45,8 @@ def categorias(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -77,25 +62,16 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page_url) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<div id="wrapper" class="ortala">(.*?)<div class="butonlar">')
patron = '<iframe\s+src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append( Item(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)

View File

@@ -40,11 +40,11 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -55,17 +55,16 @@ def peliculas(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="movie-poster"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
plot = ""
contentTitle = scrapedtitle
url = urlparse.urljoin(item.url,scrapedurl)
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=scrapedtitle , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=scrapedthumbnail,
plot=plot, contentTitle = scrapedtitle) )
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente &raquo;</a>')
if next_page:
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ))
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ))
return itemlist
@@ -76,10 +75,7 @@ def play(item):
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
url = urlparse.urljoin(item.url, url)
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
if url == "":
url = scrapertools.find_single_match(data, 'window.location="([^"]+)"')
itemlist = servertools.find_video_items(data=url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle

View File

@@ -101,7 +101,7 @@ def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, 'Playerholder.*?src="([^"]+)"')
item.url = scrapertools.find_single_match(data, '(?i)Playerholder.*?src="([^"]+)"')
if "tubst.net" in item.url:
url = scrapertools.find_single_match(data, 'itemprop="embedURL" content="([^"]+)')
data = httptools.downloadpage(url).data

View File

@@ -43,41 +43,47 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item"><a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)">.*?<div class="right">([^"]+)</div>'
patron = '<div class="item"><a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img src="([^"]+)">.*?'
patron += '<div class="right">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="masonry-item item ".*?<a href="([^"]+)" class="kt_imgrc popfire" title="([^"]+)" >.*?<img src="([^"]+)"'
patron = '<li class="masonry-item item ".*?'
patron += '<a href="([^"]+)" class="kt_imgrc popfire" title="([^"]+)" >.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle) )
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<source id="video_source_1" src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:

View File

@@ -42,26 +42,31 @@ def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<a href="http://freepornstreams.org/freepornst/stout.php">Top Sites</a>(.*?)</aside>')
data = scrapertools.get_match(data,'>Top Sites</a>(.*?)</aside>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li id="menu-item-\d+".*?u=([^"]+)">(.*?)</a>'
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li><a href="([^"]+)" rel="nofollow">(.*?)</a>'
data = scrapertools.get_match(data,'Top Tags(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
scrapedurl = scrapedurl.replace ("http://freepornstreams.org/freepornst/stout.php?s=100,75,65:*&#038;u=" , "")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -70,24 +75,27 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<article id="post-\d+".*?<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?<img src="([^"]+)"'
patron = '<article id="post-\d+".*?'
patron += '<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, fulltitle=title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, fulltitle=title) )
next_page_url = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
@@ -95,4 +103,4 @@ def play(item):
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -0,0 +1,173 @@
{
"id": "gnula_biz",
"name": "Gnula.Biz",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "gnula_biz.png",
"banner": "gnula_biz.png",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE",
"VO"
]
},
{
"id": "save_last_search",
"type": "bool",
"label": "Guardar última búsqueda",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1",
"Ninguno"
]
},
{
"id": "menu_info",
"type": "bool",
"label": "Mostrar menú intermedio película/episodio",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "last_page",
"type": "bool",
"label": "Ocultar opción elegir página en películas (Kodi)",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "filtro_defecto_peliculas",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas1",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas2",
"type": "label",
"enabled": true,
"visible": false
},
{
"pers_peliculas3": {
"type": "label",
"enabled": true,
"visible": false
}
},
{
"id": "filtro_defecto_series",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series1",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series2",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series3",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "last_search",
"type": "text",
"enabled": true,
"visible": false
}
]
}

View File

@@ -0,0 +1,790 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST', 'Vo':'VO', 'Vose': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'powvideo', 'rapidvideo', 'streamango', 'streamcloud', 'flashx', 'gamovideo', 'streamplay']
__modo_grafico__ = config.get_setting('modo_grafico', 'gnula_biz')
__perfil__ = int(config.get_setting('perfil', "gnula_biz"))
__menu_info__ = config.get_setting('menu_info', 'gnula_biz')
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'gnula_biz')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'gnula_biz')
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://gnula.biz"
def mainlist(item):
logger.info()
item.text_color = color1
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(action="seccion_peliculas", title="Películas", fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", thumbnail=get_thumb('movies', auto=True)))
# Seccion series
itemlist.append(item.clone(action="seccion_series", title="Series",
url=host + "/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(item.clone(action="peliculas", title="Documentales", fanart="http://i.imgur.com/Q7fsFI6.png",
url=host + "/catalogue?type=peliculas&genre=documental",
thumbnail=get_thumb('documentaries', auto=True)))
if config.get_setting("adult_mode") != 0:
itemlist.append(item.clone(action="peliculas", title="Sección Adultos +18",
url=host + "/catalogue?type=adultos",
fanart="http://i.imgur.com/kIvE1Zh.png", thumbnail=get_thumb('adults', auto=True)))
itemlist.append(item.clone(title="Buscar...", action="local_search", thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def local_search(item):
logger.info()
text = ""
if config.get_setting("save_last_search", item.channel):
text = config.get_setting("last_search", item.channel)
from platformcode import platformtools
texto = platformtools.dialog_input(default=text, heading="Buscar en Gnula.Biz")
if texto is None:
return
if config.get_setting("save_last_search", item.channel):
config.set_setting("last_search", texto, item.channel)
return search(item, texto)
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster-media-card">(.*?)(?:<li class="search-results-item media-item">|<footer>)'
bloque = scrapertools.find_multiple_matches(data, patron)
for match in bloque:
patron = 'href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?' \
'<p class="search-results-main-info">.*?del año (\d+).*?' \
'p class.*?>(.*?)<'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, year, plot in matches:
scrapedtitle = scrapedtitle.capitalize()
item.infoLabels["year"] = year
plot = scrapertools.htmlclean(plot)
new_item = Item(channel=item.channel, thumbnail= scrapedthumbnail, plot=plot)
if "/serie/" in scrapedurl:
new_item.show = scrapedtitle
new_item.contentType = 'tvshow'
scrapedurl += "/episodios"
title = " [Serie]"
new_item.action = 'episodios'
elif "/pelicula/" in scrapedurl:
new_item.action = "findvideos"
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
new_item.contentType = "movie"
new_item.extra='media'
new_item.contentTitle= scrapedtitle
new_item.infoLabels['filtro'] = filter_list
else:
continue
new_item.title = scrapedtitle + " (" + year + ")"
new_item.url = scrapedurl
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Más resultados')
if next_page != "":
next_page = urlparse.urljoin(host, next_page)
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Siguiente", url=next_page,
thumbnail=item.thumbnail, text_color=color3))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
strings = {}
# Se utilizan los valores por defecto/guardados o los del filtro personalizado
if not item.values:
valores_guardados = config.get_setting("filtro_defecto_" + item.extra, item.channel)
else:
valores_guardados = item.values
item.values = ""
if valores_guardados:
dict_values = valores_guardados
else:
dict_values = None
if dict_values:
dict_values["filtro_per"] = 0
excluidos = ['País', 'Películas', 'Series', 'Destacar']
data = httptools.downloadpage(item.url).data
matches = scrapertools.find_multiple_matches(data, '<div class="dropdown-sub[^>]+>(\S+)(.*?)</ul>')
i = 0
for filtro_title, values in matches:
if filtro_title in excluidos: continue
filtro_title = filtro_title.replace("Tendencia", "Ordenar por")
id = filtro_title.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower()
list_controls.append({'id': id, 'label': filtro_title, 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
valores[id] = []
valores[id].append('')
strings[filtro_title] = []
list_controls[i]['lvalues'] = []
if filtro_title == "Ordenar por":
list_controls[i]['lvalues'].append('Más recientes')
strings[filtro_title].append('Más recientes')
else:
list_controls[i]['lvalues'].append('Cualquiera')
strings[filtro_title].append('Cualquiera')
patron = '<li>.*?(?:genre|release|quality|language|order)=([^"]+)">([^<]+)<'
matches_v = scrapertools.find_multiple_matches(values, patron)
for value, key in matches_v:
if value == "action-adventure": continue
list_controls[i]['lvalues'].append(key)
valores[id].append(value)
strings[filtro_title].append(key)
i += 1
item.valores = valores
item.strings = strings
if "Filtro Personalizado" in item.title:
return filtrado(item, valores_guardados)
list_controls.append({'id': 'espacio', 'label': '', 'enabled': False,
'type': 'label', 'default': '', 'visible': True})
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
list_controls.append({'id': 'filtro_per', 'label': 'Guardar filtro en acceso directo...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No guardar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
list_controls.append({'id': 'remove', 'label': 'Eliminar filtro personalizado...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No eliminar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
from platformcode import platformtools
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra los resultados", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
# Guarda el filtro para que sea el que se cargue por defecto
if "save" in values and values["save"]:
values_copy.pop("remove")
values_copy.pop("filtro_per")
values_copy.pop("save")
config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel)
# Elimina el filtro personalizado elegido
if "remove" in values and values["remove"] != 0:
config.set_setting("pers_" + item.extra + str(values["remove"]), "", item.channel)
values_copy = values.copy()
# Guarda el filtro en un acceso directo personalizado
if "filtro_per" in values and values["filtro_per"] != 0:
index = item.extra + str(values["filtro_per"])
values_copy.pop("filtro_per")
values_copy.pop("save")
values_copy.pop("remove")
config.set_setting("pers_" + index, values_copy, item.channel)
genero = item.valores["genero"][values["genero"]]
year = item.valores["year"][values["year"]]
calidad = item.valores["calidad"][values["calidad"]]
idioma = item.valores["idioma"][values["idioma"]]
order = item.valores["ordenar_por"][values["ordenar_por"]]
strings = []
for key, value in dict(item.strings).items():
key2 = key.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower()
strings.append(key + ": " + value[values[key2]])
item.valores = "Filtro: " + ", ".join(sorted(strings))
item.strings = ""
item.url = host + "/catalogue?type=%s&genre=%s&release=%s&quality=%s&language=%s&order=%s" % \
(item.extra, genero, year, calidad, idioma, order)
return globals()[item.extra](item)
def seccion_peliculas(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Novedades", fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", thumbnail=get_thumb('newest', auto=True)))
itemlist.append(item.clone(action="peliculas", title="Estrenos",
url=host + "/estrenos-de-cine", fanart="http://i.imgur.com/PjJaW8o.png",
thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(item.clone(action="filtro", title="Filtrar películas", extra="peliculas",
url=host + "/catalogue?type=peliculas",
fanart="http://i.imgur.com/PjJaW8o.png"))
# Filtros personalizados para peliculas
for i in range(1, 4):
filtros = config.get_setting("pers_peliculas" + str(i), item.channel)
if filtros:
title = "Filtro Personalizado " + str(i)
new_item = item.clone()
new_item.values = filtros
itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", extra="peliculas"))
itemlist.append(item.clone(action="mapa", title="Mapa de películas", extra="peliculas",
url=host + "/mapa-de-peliculas",
fanart="http://i.imgur.com/PjJaW8o.png"))
return itemlist
def seccion_series(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="ultimos", title="Últimos capítulos",
url=host + "/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(item.clone(action="series", title="Series recientes",
url=host + "/catalogue?type=series",
fanart="http://i.imgur.com/9loVksV.png", thumbnail=get_thumb('recents', auto=True)))
itemlist.append(item.clone(action="filtro", title="Filtrar series", extra="series",
url=host + "/catalogue?type=series",
fanart="http://i.imgur.com/9loVksV.png"))
# Filtros personalizados para series
for i in range(1, 4):
filtros = config.get_setting("pers_series" + str(i), item.channel)
if filtros:
title = " Filtro Personalizado " + str(i)
new_item = item.clone()
new_item.values = filtros
itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/9loVksV.png",
url=host + "/catalogue?type=series", extra="series"))
itemlist.append(item.clone(action="mapa", title="Mapa de series", extra="series",
url=host + "/mapa-de-series",
fanart="http://i.imgur.com/9loVksV.png"))
return itemlist
def mapa(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="sitemap-initial"> <a class="initial-link" href="([^"]+)">(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedtitle = scrapedtitle.capitalize()
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=item.extra, extra="mapa"))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
if "valores" in item and item.valores:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
action = "findvideos"
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data,
'<div class="media-card "(.*?)<div class="hidden-info">')
for match in bloque:
if item.extra == "mapa":
patron = '.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl)
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie", infoLabels={'filtro':filter_list}))
else:
patron = '<div class="audio-info">(.*?)<div (class="quality.*?)src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for idiomas, calidad, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
calidad = scrapertools.find_single_match(calidad, '<div class="quality-info".*?>([^<]+)</div>')
if calidad:
calidad = calidad.capitalize().replace("Hd", "HD")
audios = []
if "medium-es" in idiomas: audios.append('CAST')
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
audios.append('VO')
title = "%s [%s]" % (scrapedtitle, "/".join(audios))
if calidad:
title += " (%s)" % calidad
url = urlparse.urljoin(host, scrapedurl)
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie", quality=calidad, language=audios,
infoLabels={'filtro':filter_list}))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if next_page != "" and item.title != "":
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Siguiente", url=next_page,
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
if not config.get_setting("last_page", item.channel) and config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="select_page", title="Ir a página...", url=next_page,
thumbnail=item.thumbnail, text_color=color5))
return itemlist
def ultimos(item):
logger.info()
item.text_color = color2
action = "episodios"
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="info-availability '
'one-line">')
for match in bloque:
patron = '<div class="audio-info">(.*?)<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for idiomas, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
show = re.sub(r'(\s*[\d]+x[\d]+\s*)', '', scrapedtitle)
audios = []
if "medium-es" in idiomas: audios.append('CAST')
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
audios.append('VO')
title = "%s - %s" % (show, re.sub(show, '', scrapedtitle))
if audios:
title += " [%s]" % "/".join(audios)
url = urlparse.urljoin(host, scrapedurl)
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=scrapedthumbnail,
contentSerieName=show, fulltitle=show, show=show,
text_color=color2, extra="ultimos", contentType="tvshow"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "":
itemlist.append(item.clone(action="ultimos", title=">> Siguiente", url=next_page, text_color=color3))
return itemlist
def series(item):
logger.info()
itemlist = []
if "valores" in item:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="hidden-info">')
for match in bloque:
patron = '<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl + "/episodios")
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, contentSerieName=scrapedtitle, fulltitle=scrapedtitle,
show=scrapedtitle, text_color=color2, contentType="tvshow"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
except:
pass
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "":
title = ">> Siguiente - Página " + scrapertools.find_single_match(next_page, 'page=(\d+)')
itemlist.append(Item(channel=item.channel, action="series", title=title, url=next_page,
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
return itemlist
def menu_info(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
if year != "" and not "tmdb_id" in item.infoLabels:
try:
from core import tmdb
item.infoLabels["year"] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
id = scrapertools.find_single_match(item.url, '/(\d+)/')
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id, ignore_response_code=True).data
try:
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "": item.infoLabels["trailer"] = trailer_url
except:
pass
title = "Ver enlaces %s - [" + item.contentTitle + "]"
itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="media", type="streaming"))
itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="media", type="download"))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
fanart=item.fanart, fulltitle=item.fulltitle,
extra="media|"))
return itemlist
def episodios(item):
logger.info()
itemlist = []
if item.extra == "ultimos":
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
item.url += "/episodios"
data = httptools.downloadpage(item.url).data
action = "findvideos"
patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, episode, scrapedtitle in matches:
new_item = item.clone(action=action, url=scrapedurl, text_color=color2, contentType="episode")
new_item.contentSeason = episode.split("x")[0]
new_item.contentEpisodeNumber = episode.split("x")[1]
new_item.title = episode + " - " + scrapedtitle
new_item.extra = "episode"
if "episodios" in item.extra or item.path:
new_item.extra = "episode|"
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist.reverse()
if "episodios" not in item.extra and not item.path:
id = scrapertools.find_single_match(item.url, '/(\d+)/')
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id, ignore_response_code=True).data
try:
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "": item.infoLabels["trailer"] = trailer_url
except:
pass
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, action="add_serie_to_library", text_color=color5,
title="Añadir serie a la videoteca", show=item.show, thumbnail=item.thumbnail,
url=item.url, fulltitle=item.fulltitle, fanart=item.fanart,
extra="episodios###episodios",
contentTitle=item.fulltitle))
return itemlist
def menu_info_episode(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.show == "":
item.show = scrapertools.find_single_match(data, 'class="h1-like media-title".*?>([^<]+)</a>')
episode = scrapertools.find_single_match(data, '<span class="indicator">([^<]+)</span>')
item.infoLabels["season"] = episode.split("x")[0]
item.infoLabels["episode"] = episode.split("x")[1]
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, 'id="episode-plot">(.*?)</p>')
if not "No hay sinopsis" in sinopsis:
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
title = "Ver enlaces %s - [" + item.show + " " + episode + "]"
itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="episode", type="streaming"))
itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="episode", type="download"))
siguiente = scrapertools.find_single_match(data, '<a class="episode-nav-arrow next" href="([^"]+)" title="([^"]+)"')
if siguiente:
titulo = ">> Siguiente Episodio - [" + siguiente[1] + "]"
itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=siguiente[0], extra="",
text_color=color1))
patron = '<a class="episode-nav-arrow previous" href="([^"]+)" title="([^"]+)"'
anterior = scrapertools.find_single_match(data, patron)
if anterior:
titulo = "<< Episodio Anterior - [" + anterior[1] + "]"
itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=anterior[0], extra="",
text_color=color3))
url_serie = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
url_serie += "/episodios"
itemlist.append(item.clone(title="Ir a la lista de capítulos", action="episodios", url=url_serie, extra="",
text_color=color4))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if not "|" in item.extra and not __menu_info__:
data = httptools.downloadpage(item.url, add_referer=True).data
year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
if year != "" and not "tmdb_id" in item.infoLabels:
try:
from core import tmdb
item.infoLabels["year"] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
id = scrapertools.find_single_match(item.url, '/(\d+)/')
if id == "":
data = httptools.downloadpage(item.url, add_referer=True).data
id = scrapertools.find_single_match(str(data), 'data-idm="(.*?)"')
if "|" in item.extra or not __menu_info__:
extra = item.extra
if "|" in item.extra:
extra = item.extra[:-1]
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
itemlist=(get_enlaces(item, url, "Online"))
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
itemlist.extend(get_enlaces(item, url, "de Descarga"))
if extra == "media":
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id, ignore_response_code=True).data
try:
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "": item.infoLabels["trailer"] = trailer_url
except:
pass
title = "Ver enlaces %s - [" + item.contentTitle + "]"
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if config.get_videolibrary_support() and not "|" in item.extra:
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
fanart=item.fanart, fulltitle=item.fulltitle,
extra="media|"))
else:
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type)
type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
itemlist.extend(get_enlaces(item, url, type))
if __comprueba_enlaces__:
for it in itemlist:
if it.server != '' and it.url != '':
it.url = normalizar_url(it.url, it.server)
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def normalizar_url(url, server):
# Pasar por findvideosbyserver para para obtener url a partir de los pattern/url de los json de servidores
# Excepciones copiadas de la funcion play
url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
enlaces = servertools.findvideosbyserver(url, server)[0]
if enlaces[1] != '':
return enlaces[1]
return url
def get_enlaces(item, url, type):
itemlist = []
data = httptools.downloadpage(url, add_referer=True).data
if type == "Online":
gg = httptools.downloadpage(item.url, add_referer=True).data
bloque = scrapertools.find_single_match(gg, 'class="tab".*?button show')
patron = 'a href="#([^"]+)'
patron += '.*?language-ES-medium ([^"]+)'
patron += '.*?</i>([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedopcion, scrapedlanguage, scrapedcalidad in matches:
google_url = scrapertools.find_single_match(bloque, 'id="%s.*?src="([^"]+)' % scrapedopcion)
if "medium-es" in scrapedlanguage: language = "CAST"
if "medium-en" in scrapedlanguage: language = "VO"
if "medium-vs" in scrapedlanguage: language = "VOSE"
if "medium-la" in scrapedlanguage: language = "LAT"
titulo = " [%s/%s]" % (language, scrapedcalidad.strip())
itemlist.append(
item.clone(action="play", url=google_url, title=" Ver en Gvideo" + titulo, text_color=color2,
extra="", server="gvideo", language=language, quality=scrapedcalidad.strip()))
patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
'.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
if matches:
for scrapedurl, idioma, server, calidad in matches:
if server == "streamin": server = "streaminto"
if server == "waaw" or server == "miracine": server = "netutv"
if server == "ul": server = "uploadedto"
if server == "player": server = "vimpleru"
if servertools.is_server_enabled(server):
scrapedtitle = " Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]"
itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2,
extra="", server=server, language=IDIOMAS[idioma]))
if len(itemlist) == 1:
itemlist.append(item.clone(title=" No hay enlaces disponibles", action="", text_color=color2))
return itemlist
def play(item):
logger.info()
itemlist = []
if item.extra != "" and "google" not in item.url:
post = "id=%s" % item.extra
data = httptools.downloadpage(host + "/goto/", post=post, add_referer=True).data
item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')
url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
if item.server:
enlaces = servertools.findvideosbyserver(url, item.server)[0]
else:
enlaces = servertools.findvideos(url)[0]
itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host + "/catalogue?type=peliculas"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if categoria == "series":
item.url = host + "/ultimos-capitulos"
item.action = "ultimos"
itemlist = ultimos(item)
if itemlist[-1].action == "ultimos":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def select_page(item):
import xbmcgui
dialog = xbmcgui.Dialog()
number = dialog.numeric(0, "Introduce el número de página")
if number != "":
item.url = re.sub(r'page=(\d+)', "page=" + number, item.url)
return peliculas(item)

View File

@@ -206,11 +206,12 @@ def episodesxseasons(item):
return itemlist
def findvideos(item):
from lib.generictools import privatedecrypt
logger.info()
from lib import jsunpack
itemlist = []
data = get_source(item.url)
patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>"
@@ -218,18 +219,8 @@ def findvideos(item):
headers = {'referer': item.url}
for url, quality, language in matches:
data = httptools.downloadpage(url, headers=headers, follow_redirects=False).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
packed = scrapertools.find_single_match(data, '(eval\(.*?);var')
unpacked = jsunpack.unpack(packed)
server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/")
id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'")
if server == '':
if 'powvideo' in unpacked:
id = scrapertools.find_single_match(unpacked, ",description:.'(.*?).'")
server = 'https://powvideo.net'
url = '%s/%s' % (server, id)
if server != '' and id != '':
url = privatedecrypt(url, headers)
if url != '':
language = IDIOMAS[language]
if quality.lower() == 'premium':
quality = '720p'

View File

@@ -92,7 +92,7 @@
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 10,
"default": 15,
"enabled": true,
"visible": true,
"lvalues": [
@@ -106,7 +106,12 @@
"7",
"8",
"9",
"10"
"10",
"11",
"12",
"13",
"14",
"15"
]
}
]

View File

@@ -30,7 +30,7 @@ channel = "grantorrent"
dict_url_seasons = dict()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
if timeout <= 5: timeout = timeout*2
if timeout > 0 and timeout <= 10: timeout = 15
modo_serie_temp = config.get_setting('seleccionar_serie_temporada', channel)
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel)
@@ -82,7 +82,7 @@ def submenu(item):
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
except:
pass
@@ -102,15 +102,17 @@ def submenu(item):
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
return itemlist #Salimos
itemlist.append(item.clone(action="listado", title="Novedades", url=host)) #Menú principal películas
itemlist.append(item.clone(action="listado", title="Novedades", url=host)) #Menú principal películas
itemlist.append(item.clone(action="generos", title="Películas **Géneros**", url=host)) #Lista de Géneros
for scrapedurl, scrapedtitle in matches:
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('utf8').encode('utf8').strip()
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').encode('utf8').strip()
if not "películas" in scrapedtitle.lower(): #Evita la entrada de ayudas y demás
continue
itemlist.append(item.clone(action="listado", title=scrapedtitle, url=scrapedurl)) #Menú películas
itemlist.append(item.clone(action="listado", title=title, url=scrapedurl)) #Menú películas
else: #Tratamos Series
patron = '<li class="navigation-top-dcha">.*?<a href="(.*?)".*?class="series"> (.*?)\s?<\/a><\/li>'
@@ -124,9 +126,49 @@ def submenu(item):
return itemlist #Salimos
for scrapedurl, scrapedtitle in matches:
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('utf8').encode('utf8').strip()
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').encode('utf8').strip()
itemlist.append(item.clone(action="listado", title=scrapedtitle, url=scrapedurl)) #Menú series
itemlist.append(item.clone(action="listado", title=title, url=scrapedurl)) #Menú series
itemlist.append(item.clone(action="generos", title="Series **Géneros**", url=host + "series/")) #Lista de Géneros
return itemlist
def generos(item):
logger.info()
itemlist = []
thumb_buscar = get_thumb("search.png")
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
except:
pass
if not data:
logger.error("ERROR 01: GÉNEROS: La Web no responde o ha cambiado de URL: " + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #Algo no funciona, pintamos lo que tenemos
item.extra2 = 'generos'
patron = '<div class="titulo-sidebar">CATEGORÍAS<\/div><div class="contenedor-sidebar"><ul class="categorias-home">(.*?)<\/ul><\/div><\/aside>'
data = scrapertools.find_single_match(data, patron)
patron = '\s*<a href="([^"]+)"><li class="categorias">(.*?)<\/li><\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches:
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
for clone_inter, autoridad in item.intervencion:
thumb_intervenido = get_thumb(autoridad)
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
return itemlist #Salimos
for scrapedurl, scrapedtitle in matches:
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').encode('utf8').strip().capitalize()
itemlist.append(item.clone(action="listado", title=title, url=scrapedurl)) #Listado de géneros
return itemlist
@@ -157,7 +199,7 @@ def listado(item):
timeout_search = timeout # Timeout para descargas
if item.action == 'search':
timeout_search = int(timeout * 1.5) # Timeout un poco más largo para las búsquedas
if timeout_search < 10:
if timeout_search > 0 and timeout_search < 10:
timeout_search = 10 # Timeout un poco más largo para las búsquedas
#Máximo num. de líneas permitidas por TMDB (40). Máx de 5 páginas por Itemlist para no degradar el rendimiento.
@@ -205,10 +247,10 @@ def listado(item):
if "page/" in item.post:
item.post = re.sub(r"page\/\d+\/", "page/%s/" % post, item.post)
else:
if "/series" in item.post:
item.post = re.sub(r"\/series\/", "/series/page/%s/" % post, item.post)
elif "/categoria" in item.post:
if "/categoria" in item.post:
item.post = re.sub(r"\/$", "/page/%s/" % post, item.post)
elif "/series" in item.post:
item.post = re.sub(r"\/series\/", "/series/page/%s/" % post, item.post)
else:
item.post = re.sub(r"\.net\/", ".net/page/%s/" % post, item.post)
@@ -328,9 +370,12 @@ def listado(item):
item_local.url = urlparse.urljoin(host, scrapedurl)
if "categoria" in item.url: #En páginas de Categorias no viene ni la calidad ni el idioma
item_local.quality = scrapertools.find_single_match(item.url, r'\/categoria\/(.*?)\/')
if "4k" in item_local.quality.lower():
item_local.quality = "4K HDR" #Maquillamos un poco la calidad
if not item.extra2:
item_local.quality = scrapertools.find_single_match(item.url, r'\/categoria\/(.*?)\/')
if "4k" in item_local.quality.lower():
item_local.quality = "4K HDR" #Maquillamos un poco la calidad
else:
item_local.quality = ''
lang = '' #Ignoramos el idioma
elif not "post" in quality:
item_local.quality = quality #Salvamos la calidad en el resto de páginas

View File

@@ -9,7 +9,7 @@
"banner": "",
"categories": [
"movie",
"vose",
"vos"
],
"settings": [
{

View File

@@ -53,7 +53,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url="http://www.inkapelis.com/", text_color=color1,
thumbnail=get_thumb('genres', auto=True),))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="", text_color=color1))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="http://www.inkapelis.com/?s=", text_color=color1))
itemlist.append(Item(channel=item.channel, action="", title=""))
itemlist.append(
Item(channel=item.channel, action="filtro", title="Filtrar películas", url="http://www.inkapelis.com/?s=", text_color=color1))

View File

@@ -10,7 +10,7 @@
"categories": [
"movie",
"tvshow",
"vose",
"vose"
],
"settings": [
{

View File

@@ -102,14 +102,14 @@ def sub_search(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages')
# logger.info(data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
patron += '<span class="year">([^<]+)</span>'
data = scrapertools.find_single_match(data, 'Archivos (.*?)resppages')
patron = 'img alt="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'href="([^"]+)".*?'
patron += 'fechaestreno">([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
for scrapedtitle, scrapedthumbnail, scrapedurl, year in matches:
if 'tvshows' not in scrapedurl:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
@@ -133,18 +133,19 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
# logger.info(data)
# img, title
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url
patron += '</h3><span>([^<]+)</span>' # year
patron = '(?is)movie-img img-box.*?alt="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'href="([^"]+).*?'
patron += 'fechaestreno">([^<]+).*?'
patron += 'quality">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]:
for scrapedtitle, scrapedthumbnail, scrapedurl, year, quality in matches[item.page:item.page + 30]:
title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality)
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
@@ -172,10 +173,10 @@ def genresYears(item):
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
data = scrapertools.decodeHtmlentities(data)
if item.title == "Estrenos por Año":
if item.title == "Estrenos":
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
else:
patron_todas = '<h2>Generos</h2>(.*?)</div><aside'
patron_todas = '(?is)genres falsescroll(.*?)</div> </aside'
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
data = scrapertools.find_single_match(data, patron_todas)

View File

@@ -46,7 +46,7 @@ def mainlist(item):
itemlist.append(item.clone(title="Por Año",
action="seccion",
url=host + '/peliculas/2017/',
url=host + '/peliculas/2019/',
thumbnail=get_thumb('year', auto=True),
seccion='anios'
))
@@ -60,7 +60,7 @@ def mainlist(item):
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/api/elastic/suggest?query=',
url=host + '/api/suggest?query=',
thumbnail=get_thumb('search', auto=True)
))
@@ -185,14 +185,15 @@ def seccion(item):
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
headers = {'referer':host, 'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(item.url, headers=headers).data
dict_data = jsontools.load(data)
resultados = dict_data['result'][0]['options']
resultados = dict_data['suggest']['result'][0]['options']
for resultado in resultados:
if 'title' in resultado['_source']:
title = resultado['_source']['title']
thumbnail = 'http://s3.amazonaws.com/pelisfox' + '/' + resultado['_source']['cover']
thumbnail = 'https://static.pelisfox.tv/static/movie' + '/' + resultado['_source']['cover']
plot = resultado['_source']['sinopsis']
url = host + resultado['_source']['url'] + '/'

View File

@@ -2,6 +2,7 @@
import re
import urllib
import base64
from core import httptools
from core import scrapertools
@@ -185,27 +186,70 @@ def search(item, texto):
def findvideos(item):
logger.info()
itemlist = []
global new_data
new_data = []
data = get_source(item.url)
data = data.replace("&lt;","<").replace("&quot;",'"').replace("&gt;",">").replace("&amp;","&").replace('\"',"")
patron = '<div class=TPlayerTb.*?id=(.*?)>.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
headers = {'referer':item.url}
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
headers = {'referer':item.url}
if 'trembed' in urls_page:
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data=httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="(.*?)" ')
itemlist.append(item.clone(title='[%s][%s]',
url=urls_page,
action='play',
language=language,
))
sub_data = httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="([^"]+)" ')
if "repro.live" in urls_page:
server_repro(urls_page)
if "itatroniks.com" in urls_page:
server_itatroniks(urls_page)
for url in new_data:
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
new_data = []
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist
def server_itatroniks(urls_page):
logger.info()
headers = {"Referer":urls_page}
id = scrapertools.find_single_match(urls_page, 'embed/(\w+)')
sub_data = httptools.downloadpage(urls_page, headers = headers).data
matches = scrapertools.find_multiple_matches(sub_data, 'button id="([^"]+)')
headers1 = ({"X-Requested-With":"XMLHttpRequest"})
for serv in matches:
data1 = httptools.downloadpage("https://itatroniks.com/get/%s/%s" %(id, serv), headers = headers1).data
data_json = jsontools.load(data1)
urls_page = ""
try:
if "finished" == data_json["status"]: urls_page = "https://%s/embed/%s" %(data_json["server"], data_json["extid"])
if "propio" == data_json["status"]: urls_page = "https://%s/e/%s" %(data_json["server"], data_json["extid"])
except:
continue
new_data.append(urls_page)
def server_repro(urls_page):
logger.info()
headers = {"Referer":urls_page}
sub_data = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
for urls_page in urls_page1:
urls_page += "==" # base64.decode no decodifica si no tiene al final "=="
urls_page = base64.b64decode(urls_page)
if "repro.live" in urls_page:
data1 = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(data1, 'source src="([^"]+)')
for urls_page in urls_page1:
new_data.append(urls_page)
else:
new_data.append(urls_page)
def newest(categoria):
logger.info()
itemlist = []

View File

@@ -426,7 +426,10 @@ def findvideos(item):
url = url.replace('\\', '')
servername = servertools.get_server_from_url(url)
if 'pelishd24.net' in url or 'stream.pelishd24.com' in url:
url = url.strip()
vip_data = httptools.downloadpage(url).data
if 'Archivo ELiminado' in vip_data:
continue
dejuiced = generictools.dejuice(vip_data)
patron = '"file":"([^"]+)"'
match = re.compile(patron, re.DOTALL).findall(dejuiced)

View File

@@ -286,7 +286,7 @@ def temporadas(item):
if len(matches) > 1:
for scrapedthumbnail, temporada, url in matches:
new_item = item.clone(action="episodios", season=temporada, url=url,
new_item = item.clone(action="episodesxseason", season=temporada, url=url,
thumbnail=host + scrapedthumbnail, extra='serie')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
@@ -308,10 +308,18 @@ def temporadas(item):
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episodios(item)
return episdesxseason(item)
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
from core import jsontools
@@ -331,7 +339,6 @@ def episodios(item):
episode = element['metas_formateadas']['nepisodio']
season = element['metas_formateadas']['ntemporada']
scrapedurl = element['url_directa']
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(
@@ -359,10 +366,10 @@ def episodios(item):
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
# if config.get_videolibrary_support() and len(itemlist) > 0:
# itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
# action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
# text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist

View File

@@ -226,7 +226,6 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for video_url in matches:
language = 'latino'
if not config.get_setting('unify'):
title = ' [%s]' % language.capitalize()
@@ -235,34 +234,39 @@ def findvideos(item):
if 'pelisplus.net' in video_url:
referer = video_url
post = {'r':item.url}
post = {'r':item.url, 'd': 'www.pelisplus.net'}
post = urllib.urlencode(post)
video_url = video_url.replace('/v/', '/api/sources/')
video_url = video_url.replace('/v/', '/api/source/')
url_data = httptools.downloadpage(video_url, post=post, headers={'Referer':referer}).data
patron = '"file":"([^"]+)","label":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(url_data)
for url, quality in matches:
url = url.replace('\/', '/')
url = 'https://www.pelisplus.net' + url.replace('\/', '/')
itemlist.append(
Item(channel=item.channel, title='%s' + title, url=url, action='play', language=IDIOMAS[language],
quality=quality, infoLabels=item.infoLabels))
else:
url_data = get_source(video_url)
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
if 'server' in url:
hidden_data = get_source(hidden_url)
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
if not 'vidoza' in video_url:
url_data = get_source(video_url)
if 'vidoza' not in video_url:
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
else:
url = video_url
if not 'server' in url:
url = url
if 'pelishd.net' in url:
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
dejuiced = generictools.dejuice(vip_data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
if 'pelishd' in url:
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False)
try:
dejuiced = generictools.dejuice(vip_data.data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
except:
pass
if url != '':
if url != '' and 'rekovers' not in url:
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))

View File

@@ -57,7 +57,7 @@ def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="thumb" href="([^"]+)" title="([^"]+)">.*?'
patron = '<a class="thumb" href="([^"]+)" title="([^"]+)".*?'
patron += '<img class="lazyload" data-src="([^"]+)".*?'
patron += '<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)

View File

@@ -3,8 +3,6 @@
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import urllib
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
@@ -188,6 +186,7 @@ def findvideos(item):
for datos in dict:
url1 = datos["url"]
hostname = scrapertools.find_single_match(datos["hostname"].replace("www.",""), "(.*?)\.")
if "repelisgo" in hostname or "repelis.io" in datos["hostname"]: continue
if hostname == "my": hostname = "mailru"
titulo = "Ver en: " + hostname.capitalize() + " (" + cali[datos["quality"]] + ") (" + idio[datos["audio"]] + ")"
itemlist.append(
@@ -226,8 +225,6 @@ def play(item):
logger.info()
itemlist = []
url1 = httptools.downloadpage(host + item.url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "storage" in url1:
url1 = scrapertools.find_single_match(url1, "src=(.*mp4)").replace("%3A",":").replace("%2F","/")
itemlist.append(item.clone(url=url1))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist[0].thumbnail = item.contentThumbnail

View File

@@ -138,12 +138,15 @@ def episodesxseason(item):
data = get_source(item.url)
infoLabels = item.infoLabels
season = infoLabels['season']
patron = '<img src="([^>]+)"></a></div><div class="numerando">%s+ - (\d+)</div>' % season
patron = '<img src="([^>]+)"></a></div><div class="numerando">%s+ - (\d+|\d+\/\d+)</div>' % season
patron += '<div class="episodiotitle"><a href="([^"]+)">(.*?)</a><'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedepi, scrapedurl, scrapedtitle in matches:
if '/' in scrapedepi:
scrapedepi = scrapertools.find_single_match (scrapedepi, '(\d+)\/\d+')
title = '%sx%s - %s' % (season, scrapedepi, scrapedtitle)
infoLabels['episode'] = scrapedepi
if scrapedepi > 0:

View File

@@ -1,7 +1,7 @@
{
"id": "rexpelis",
"name": "Rexpelis",
"active": false,
"active": true,
"adult": false,
"language": ["lat","cast"],
"thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png",

View File

@@ -45,10 +45,11 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title='Nuevos Capitulos', url=host, action='new_episodes', type='tvshows',
thumbnail=get_thumb('new_episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series?', action='list_all', type='tvshows',
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series/estrenos', action='list_all',
type='tvshows',
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series?', action='list_all', type='tvshows',
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series', action='list_all', type='tvshows',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + 'search?s=',
@@ -83,19 +84,14 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<article class=".*?">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='<span class="year">(\d{4})</span>.*?<span class="(?:animes|tvs)">([^<]+)<'
patron = '<article class=".*?">.*? href="([^"]+)".*?<img src="([^"]+)".*?<h3 class="card-tvshow__title">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
if scrapedtype == 'Anime':
action = 'episodesxseasons'
elif scrapedtype == 'Serie':
action = 'seasons'
action = 'seasons'
new_item = Item(channel=item.channel,
action=action,
@@ -103,15 +99,14 @@ def list_all(item):
url=url,
contentSerieName=scrapedtitle,
thumbnail=thumbnail,
type=scrapedtype,
infoLabels={'year':year})
)
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'li><a href="([^"]+)" rel="next">&raquo;</a>')
url_next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -123,14 +118,16 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='<li class="gridseason"><a href="([^"]+)"><span class="title">Temporada (\d+)</span>'
patron='<div class="season__title">Temporada (\d+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
item.type = 'Anime'
return episodesxseasons(item)
infoLabels = item.infoLabels
for scrapedurl, season in matches:
for season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -156,23 +153,27 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
if item.type == 'Anime':
season = '1'
patron = '<a class="episodie-list" href="([^"]+)" .*?</i> Episodio (\d+).*?</span>'
else:
season = item.infoLabels['season']
episode = len(matches)
for scrapedurl, scrapedtitle in matches:
patron = 'class="episodie-list" href="([^"]+)" title=".*?Temporada %s .*?pisodio (\d+).*?">' % season
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches:
patron = 'class="episodie-list" href="([^"]+)" title=".*?pisodio (\d+).*?">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, episode in matches:
infoLabels['episode'] = episode
url = scrapedurl
title = scrapedtitle.replace(' online', '')
title = '%sx%s - %s' % (season, episode, title)
title = '%sx%s - Episodio %s' % (season, episode, episode)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
episode -= 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
@@ -183,7 +184,7 @@ def new_episodes(item):
itemlist = []
data = get_source(item.url)
patron = '<article class="contenedor">.*?<a href="([^"]+)" title=".*?">.*?data-src="([^"]+)" alt="([^"]+)">'
patron = '<div class="card-episodie shadow-sm"><a href="([^"]+)".*?data-src="([^"]+)" alt="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -248,34 +249,10 @@ def search(item, texto):
item.url = item.url + texto
if texto != '':
return search_results(item)
return list_all(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<div class="search-results__img"><a href="([^"]+)" title=".*?"><img src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2></a><div class="description">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
new_item=Item(channel=item.channel, title=title, url=url, contentSerieName=title, thumbnail=thumbnail,
plot=plot, action='seasons')
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()

View File

@@ -77,7 +77,7 @@ def list_all(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" >Página siguiente')
next_page = scrapertools.find_single_match(data, 'class=\'current\'>\d</span>.*?href="([^"]+)">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
@@ -187,12 +187,12 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
if 'id=' in link:
id_type = 'id'
ir_type = 'ir'
elif 'ud=' in link:
id_type = 'ud'
ir_type = 'ur'
id_letter = scrapertools.find_single_match(link, '?(\w)d')
id_type = '%sd' % id_letter
ir_type = '%sr' % id_letter
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)

View File

@@ -11,19 +11,18 @@ from core import tmdb
from core import jsontools
host = 'https://spankbang.xxx'
host = 'https://es.spankbang.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url= host + "/new_videos/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url= host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="peliculas", url= host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Nuevos", action="peliculas", url= host + "/new_videos/"))
itemlist.append( Item(channel=item.channel, title="Mas valorados", action="peliculas", url=host + "/trending_videos/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos", action="peliculas", url= host + "/most_popular/"))
itemlist.append( Item(channel=item.channel, title="Mas largos", action="peliculas", url= host + "/longest_videos/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -31,7 +30,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s-1.html" % texto
item.url = host + "/s/%s" % texto
try:
return peliculas(item)
except:
@@ -45,78 +44,45 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '&nbsp;<a href="([^"]+)" class="link1">([^"]+)</a>'
patron = '<a href="([^"]+)/?order=trending"><img src="([^"]+)"><span>([^"]+)</span></a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
# data = httptools.downloadpage(item.url).data
data = scrapertools.cachePage(item.url)
# <div class="video-item" data-id="4652797">
# <a href="/2rq4d/video/yenlomfc" class="thumb ">
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb3.spankbang.com/250/4/6/4652797-t6.jpg" alt="yenlomfc" class="cover lazyload has_mp4" />
# <span class="play fa fa-play-circle-o fa-3x"></span>
# <span class="i-len"><i class="fa fa-clock-o"></i> 73</span>
# </a>
# <span class="i-wl" onclick="add_wl(4652797, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
# <span class="i-fav" onclick="add_fav(4652797, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
# <span class="i-flag" onclick="show_flag(4652797)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
# <div class="inf">yenlomfc</div>
# <ul>
# <li>Hace 11 minutos</li>
# <li><i class="fa fa-eye"></i> 60</li>
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
# </ul>
# </div>
# <div class="video-item" data-id="4652795">
# <a href="/2rq4b/video/penny+underbust+playstation+modeling" class="thumb ">
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb1.spankbang.com/250/4/6/4652795-t6.jpg" alt="Penny Underbust Playstation Modeling" class="cover lazyload " />
# <span class="play fa fa-play-circle-o fa-3x"></span>
# <span class="i-hd">1080p</span>
# <span class="i-len"><i class="fa fa-clock-o"></i> 3</span>
# </a>
# <span class="i-wl" onclick="add_wl(4652795, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
# <span class="i-fav" onclick="add_fav(4652795, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
# <span class="i-flag" onclick="show_flag(4652795)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
# <div class="inf">Penny Underbust Playstation Modeling</div>
# <ul>
# <li>Hace 12 minutos</li>
# <li><i class="fa fa-eye"></i> 99</li>
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
# </ul>
# </div>
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item" data-id="\d+">.*?'
patron += '<a href="([^"]+)".*?'
patron += '<a href="([^"]+)" class="thumb ">.*?'
patron += 'data-src="([^"]+)" alt="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</span>'
patron += '</span>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
# http://cdnthumb1.spankbang.com/250/4/6/4652755-t6.jpg
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="i-hd">(.*?)</span>')
duration = scrapertools.find_single_match(scrapedtime, '<i class="fa fa-clock-o"></i>(.*?)</span>')
if scrapedhd != '':
title = "[COLOR yellow]" +duration+ " min[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
else:
title = "[COLOR yellow]" + duration + " min[/COLOR] " + scrapedtitle
thumbnail = "http:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
# <li class="next"><a href="/new_videos/2/">&raquo;</a></li>
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
plot=plot, contentTitle=title ))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)">')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue",
url=next_page ) )
return itemlist
@@ -124,13 +90,13 @@ def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'servervideo = \'([^\']+)\'.*?'
patron += 'path = \'([^\']+)\'.*?'
patron += 'filee = \'([^\']+)\'.*?'
matches = scrapertools.find_multiple_matches(data, patron)
for servervideo,path,filee in matches:
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
scrapedurl = scrapertools.find_single_match(data, 'var stream_url_1080p = \'([^\']+)\';')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, 'var stream_url_720p = \'([^\']+)\';')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, 'var stream_url_480p = \'([^\']+)\';')
scrapedurl = scrapedurl.replace("amp;", "")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, thumbnail=item.thumbnail,
plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -2,7 +2,7 @@
"id": "spankwire",
"name": "spankwire",
"active": true,
"adult": false,
"adult": true,
"language": ["*"],
"thumbnail": "https://cdn1-static-spankwire.spankcdn.net/apple-touch-icon-precomposed.png",
"banner": "",

View File

@@ -2,7 +2,7 @@
"id": "streamingporn",
"name": "streamingporn",
"active": true,
"adult": false,
"adult": true,
"language": ["*"],
"thumbnail": "http://streamingporn.xyz/wp-content/uploads/2017/06/streamingporn.png",
"banner": "",

View File

@@ -48,7 +48,8 @@ def catalogo(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
@@ -56,7 +57,7 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<a href="#">Categories</a>(.*?)<li id="menu-item-30919')
data = scrapertools.get_match(data,'<a href="#">Categories</a>(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
@@ -66,7 +67,8 @@ def categorias(item):
scrapedthumbnail = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
@@ -84,11 +86,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<div class="loadMoreInfinite"><a href="(.*?)" >Load More')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" ,
text_color="blue", url=next_page_url , folder=True) )
return itemlist

View File

@@ -2,7 +2,7 @@
"id": "streamporno",
"name": "streamporno",
"active": true,
"adult": false,
"adult": true,
"language": ["*"],
"thumbnail": "http://pornstreams.eu/wp-content/uploads/2015/12/faviiconeporn.png",
"banner": "",

View File

@@ -591,7 +591,8 @@ def play(item):
os.mkdir(os.path.join(config.get_setting("videolibrarypath"), "subtitles"))
subtitles = []
subtitles.extend(item.subtitle)
item.subtitle = subtitles[0] #ponemos por defecto el primero
item.subtitle = subtitles[0] #ponemos por defecto el primeroç
#item.subtitle = os.path.join(config.get_setting("videolibrarypath"), os.path.join("subtitles", scrapertools.find_single_match(subtitles[0], '\/\d{2}\/(.*?\.\w+)$')))
for subtitle in subtitles: #recorremos la lista
subtitle_name = scrapertools.find_single_match(subtitle, '\/\d{2}\/(.*?\.\w+)$') #se pone el nombre del Sub-título
subtitle_folder_path = os.path.join(config.get_setting("videolibrarypath"), "subtitles", subtitle_name) #Path de descarga

View File

@@ -15,8 +15,8 @@ host = 'https://www.tnaflix.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/new/1"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/?period=month&d=all"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/new/?d=all&period=all"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/?d=all&period=all"))
itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/toprated/?d=all&period=month"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/all/top-rated/1/all"))
itemlist.append( Item(channel=item.channel, title="PornStars" , action="categorias", url=host + "/pornstars"))
@@ -86,7 +86,8 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>'
patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?'
patron += 'data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)

View File

@@ -163,7 +163,7 @@ def listado(item):
patron = '<div class="blogitem "><a title="([^"]+)"\s+href="([^"]+)">.*?src="([^"]+)" onload'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not 'Total: 0 resultados encontrados' in data: #error
if not matches and not 'Total: 0 resultados encontrados' in data and not 'Total: 0 results found' in data:
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error

View File

@@ -2185,21 +2185,20 @@ def acciones_trakt(item):
'runtime': config.get_localized_string(70471), 'popularity': config.get_localized_string(70472), 'percentage': config.get_localized_string(70473),
'votes': config.get_localized_string(70474), 'asc': config.get_localized_string(70475), 'desc': config.get_localized_string(70476)}
orden = valores[item.order] + " " + valores[item.how]
itemlist.append(item.clone(title=config.get_localized_string(70349) % orden, action="order_list",
text_color=color4))
# itemlist.append(item.clone(title=config.get_localized_string(70349) % orden, action="order_list",
# text_color=color4))
ratings = []
try:
if item.order:
if item.how == "asc":
reverse = False
else:
reverse = True
if item.order == "rank" or item.order == "added":
data = sorted(data, key=lambda x: x[item.order.replace("added", "listed_at")], reverse=reverse)
else:
order = item.order.replace("popularity", "votes").replace("percentage", "rating")
data = sorted(data, key=lambda x: x[x['type']].get(order, 0), reverse=reverse)
# if item.order:
# if item.how == "asc":
# reverse = False
# else:
# reverse = True
# if item.order == "rank" or item.order == "added":
# data = sorted(data, key=lambda x: x[item.order.replace("added", "last_collected_at")], reverse=reverse)
# else:
# order = item.order.replace("popularity", "votes").replace("percentage", "rating")
# data = sorted(data, key=lambda x: x[x['type']].get(order, 0), reverse=reverse)
for entry in data:
try:
@@ -2259,7 +2258,7 @@ def order_list(item):
logger.info()
list_controls = []
valores1 = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes']
valores1 = ['rating', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes']
valores2 = ['asc', 'desc']
dict_values = {'orderby': valores1.index(item.order), 'orderhow': valores2.index(item.how)}
@@ -2268,9 +2267,8 @@ def order_list(item):
'type': 'list', 'default': 0, 'visible': True})
list_controls.append({'id': 'orderhow', 'label': 'De forma:', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
list_controls[0]['lvalues'] = [config.get_localized_string(70003), config.get_localized_string(70469), config.get_localized_string(60230), config.get_localized_string(70470), config.get_localized_string(70471), config.get_localized_string(70472),
config.get_localized_string(70473), config.get_localized_string(70474)]
list_controls[1]['lvalues'] = [config.get_localized_string(70477), config.get_localized_string(70478)]
list_controls[0]['lvalues'] = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes']
list_controls[1]['lvalues'] = ['asc', 'desc']
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption=config.get_localized_string(70320), item=item, callback='order_trakt')

View File

@@ -154,14 +154,15 @@ def search(item, texto):
def findvideos(item):
logger.info()
itemlist = []
servers = {'netu': 'http://hqq.tv/player/embed_player.php?vid=',
servers = {'netu': 'https://hqq.tv/player/embed_player.php?vid=',
'open': 'https://openload.co/embed/',
'netv': 'http://goo.gl/',
'gamo': 'http://gamovideo.com/embed-',
'powvideo': 'http://powvideo.net/embed-',
'play': 'http://streamplay.to/embed-',
'vido': 'http://vidoza.net/embed-',
'net': 'http://hqq.tv/player/embed_player.php?vid='
'net': 'https://hqq.tv/player/embed_player.php?vid=',
'ntu': 'https://hqq.tv/player/embed_player.php?vid='
}
data = get_source(item.url)
noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>')

View File

@@ -80,9 +80,6 @@ def select_menu(item):
itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url,
thumbnail=get_thumb('year', auto=True), type='all'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + 'ajax/1/?q=',
thumbnail=get_thumb("search", auto=True), type=item.type))
return itemlist
def sub_menu(item):
@@ -121,6 +118,8 @@ def sub_menu(item):
url=url + '?q=%s+subtitulado' % link_type, action='list_all',
thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + '?q=',
thumbnail=get_thumb("search", auto=True), type=item.type, link_type=link_type))
return itemlist

View File

@@ -2,7 +2,7 @@
"id": "xdvideos",
"name": "XDVideos",
"active": true,
"adult": false,
"adult": true,
"language": ["lat"],
"thumbnail": "http://i.imgur.com/vKcZxXS.png",
"categories": [

View File

@@ -15,7 +15,7 @@ from platformcode import config, logger, platformtools
idio = {'https://cdn.yape.nu//languajes/la.png': 'LAT','https://cdn.yape.nu//languajes/es.png': 'ESP','https://cdn.yape.nu//languajes/en_es.png': 'VOSE'}
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner', 'DVD Rip':'DVD Rip'}
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner', 'DVD Rip':'DVD Rip', 'HD 720':'HD 720'}
list_language = idio.values()
list_quality = cali.values()

View File

@@ -549,7 +549,7 @@ def findvideos(item):
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
logger.debug(data)
#logger.debug(data)
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
if item.videolibray_emergency_urls:

View File

@@ -162,6 +162,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
# Limitar tiempo de descarga si no se ha pasado timeout y hay un valor establecido en la variable global
if timeout is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None: timeout = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
if timeout == 0: timeout = None
logger.info("----------------------------------------------")
logger.info("downloadpage Alfa: %s" %__version)

View File

@@ -25,6 +25,7 @@ from core import filetools
from core.item import Item
from platformcode import config, logger, platformtools
from core import tmdb
from lib import jsunpack
channel_py = "newpct1"
intervenido_judicial = 'Dominio intervenido por la Autoridad Judicial'
@@ -1773,17 +1774,18 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
canal_org_des_list = []
json_path_list = []
emergency_urls_force = False
status_migration = False
#if item.ow_force == '1': #Ha podido qudar activado de una pasada anteriores
#if item.ow_force == '1': #Ha podido qudar activado de una pasada anteriores
# del item.ow_force
# logger.error('** item.ow_force: ' + item.path) #aviso que ha habido una incidencia
if it.ow_force == '1': #Ha podido qudar activado de una pasada anteriores
# logger.error('** item.ow_force: ' + item.path) #aviso que ha habido una incidencia
if it.ow_force == '1': #Ha podido quedar activado de una pasada anterior
del it.ow_force
if path and it.contentType != 'movies':
if path and it.infoLabels['mediatype'] == 'tvshow':
try:
nfo = filetools.join(path, '/tvshow.nfo')
filetools.write(nfo, head_nfo + it.tojson()) #escribo el .nfo de la peli por si aborta update
logger.error('** .nfo ACTUALIZADO: it.ow_force: ' + nfo) #aviso que ha habido una incidencia
filetools.write(nfo, head_nfo + it.tojson()) #escribo el .nfo de la peli por si aborta update
logger.error('** .nfo ACTUALIZADO: it.ow_force: ' + nfo) #aviso que ha habido una incidencia
except:
logger.error('** .nfo ERROR actualizar: it.ow_force: ' + nfo) #aviso que ha habido una incidencia
logger.error(traceback.format_exc())
@@ -1828,6 +1830,9 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
#automáticamente. En el caso de peliculas, se general aquí el json actualizado y se marca el .nfo como actualizado.
#Cuando en el .json se activa "Borrar", "emergency_urls = 2", se borran todos los enlaces existentes
#Cuando en el .json se activa "Actualizar", "emergency_urls = 3", se actualizan todos los enlaces existentes
status_migration = regenerate_clones() #TEMPORAL: Reparación de Videoteca con Newpct1
"""
verify_cached_torrents() #TEMPORAL: verificamos si los .torrents son correctos
try: #Si ha habido errores, vemos la lista y los reparamos
@@ -1904,7 +1909,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
logger.error(traceback.format_exc())
#Ahora tratamos las webs intervenidas, tranformamos la url, el nfo y borramos los archivos obsoletos de la serie
if (channel not in intervencion and channel_py_alt not in intervencion and category not in intervencion and channel_alt != 'videolibrary') or not item.infoLabels: #lookup
if (channel not in intervencion and channel_py_alt not in intervencion and category not in intervencion and channel_alt != 'videolibrary') or not item.infoLabels or status_migration: #lookup o migración
return (item, it, overwrite) #... el canal/clone está listado
import ast
@@ -2053,32 +2058,35 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
#Verificamos que las webs de los canales estén activas antes de borrar los .json, para asegurar que se pueden regenerar
i = 0
for canal_org_def, canal_des_def, url_total, opt_def, ow_force_def in canal_org_des_list: #pasamos por las "parejas" a borrar
canal_org_des_list_ALT = [] #Creamos esta lista para salvar las parejas
canal_org_des_list_ALT.extend(canal_org_des_list) #... y borrar de la original las web caidas
for canal_org_def, canal_des_def, url_total, opt_def, ow_force_def in canal_org_des_list_ALT: #pasamos por las "parejas" a borrar
if "magnet:" in url_total or type(url_total) != str: #Si la url es un Magnet, o es una lista, pasamos
i += 1
continue
try:
response = httptools.downloadpage(url_total, only_headers=True)
response = httptools.downloadpage(url_total)
except:
logger.error(traceback.format_exc())
logger.error('Web ' + canal_des_def.upper() + ' ERROR. Regla no procesada: ' + str(canal_org_des_list[i]))
item = item_back.clone() #Restauro las imágenes inciales
it = it_back.clone()
item.torrent_caching_fail = True #Marcamos el proceso como fallido
return (item, it, False)
if not response.sucess:
del canal_org_des_list[i] #Borro regla
continue #... y paso a la siguiente
if not response:
logger.error('Web ' + canal_des_def.upper() + ' INACTIVA. Regla no procesada: ' + str(canal_org_des_list[i]))
item = item_back.clone() #Restauro las imágenes inciales
it = it_back.clone()
item.torrent_caching_fail = True #Marcamos el proceso como fallido
return (item, it, False)
del canal_org_des_list[i] #Borro regla
continue #... y paso a la siguiente
i += 1
if i == 0:
item = item_back.clone() #Restauro las imágenes inciales
it = it_back.clone()
item.torrent_caching_fail = True #Marcamos el proceso como fallido
return (item, it, False)
# Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
canal_erase_list = []
from core import videolibrarytools
raiz, carpetas_series, ficheros = filetools.walk(path).next()
ficheros = [filetools.join(path, f) for f in ficheros] #Almacenamos la lista de archivos de la carpeta
ficheros = [filetools.join(path, f) for f in ficheros] #Almacenamos la lista de archivos de la carpeta
#logger.error(ficheros)
for archivo in ficheros:
for canal_org_def, canal_des_def, url_total, opt_def, ow_force_def in canal_org_des_list: #pasamos por las "parejas" a borrar
@@ -2089,9 +2097,12 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
if archivo_alt == "''": archivo_alt = "'xyz123'"
#logger.error(canal_erase + canal_new + archivo + archivo_alt)
#Borramos los .json que sean de los canal afectados, incluidos todos los de los clones de newpct1 si éste es el canal
if canal_erase in archivo or (ow_force_def == 'emerg' and canal_erase_alt in fail_over_list and archivo_alt in fail_over_list):
if canal_erase in archivo or (ow_force_def == 'emerg' and canal_erase_alt in fail_over_list and archivo_alt in fail_over_list and it.contentType != 'movie'):
if canal_des_def and it.contentType == 'movie' and not '.torrent' in archivo: #Si es película ...
item_json = ''
item_json = Item().fromjson(filetools.read(archivo)) #leemos el .json ante de borrarlo para salvar...
if not item_json: #error al leer el .json. Algo no funciona...
continue
title = item_json.title #... el título con su formato
language = item_json.language #... los idiomas, que no están en el .nfo
wanted = item_json.wanted #... y wanted con el título original
@@ -2133,7 +2144,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
if json_path_list:
logger.error('** .json LIST: ' + str(json_path_list))
for canal_org_def, canal_des_def, url_total, json_path, title, language, wanted, ow_force_def, opt_def, archivo in json_path_list: #pasamos por todos canales
for canal_org_def, canal_des_def, url_total, json_path, title, language, wanted, ow_force_def, opt_def, archivo in json_path_list: #pasamos por todos canales
logger.error('** ESCRIBIMOS: ' + json_path)
item_movie.emergency_urls = False
del item_movie.emergency_urls
@@ -2156,6 +2167,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
if not it.emergency_urls: #... lo actualizamos en el .nfo
it.emergency_urls = dict() #... iniciamos la variable si no existe
it.emergency_urls.update({canal_des_def: True}) #... se marca como activo
if it.ow_force: del it.ow_force
filetools.write(archivo_nfo, head_nfo + it.tojson()) #actualizo el .nfo de la peli
else:
logger.error('Error en FINDVIDEOS: ' + archivo + ' / Regla: ' + canal_org_def + ', ' + opt_def + ', ' + ow_force_def)
@@ -2165,6 +2177,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
del it.emergency_urls
if it.emergency_urls and it.emergency_urls.get(item_movie.channel, False):
it.emergency_urls.pop(item_movie.channel, None) #borramos la entrada del .nfo
if it.ow_force: del it.ow_force
filetools.write(archivo_nfo, head_nfo + it.tojson()) #actualizo el .nfo de la peli
filetools.write(json_path, item_movie.tojson()) #Salvamos el nuevo .json de la película
@@ -2200,7 +2213,7 @@ def verify_cached_torrents():
json_error_path = filetools.join(config.get_runtime_path(), 'error_cached_torrents.json')
json_error_path_BK = filetools.join(config.get_runtime_path(), 'error_cached_torrents_BK.json')
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
movies = config.get_setting("folder_movies")
series = config.get_setting("folder_tvshows")
torrents_movies = filetools.join(videolibrary_path, config.get_setting("folder_movies")) #path de CINE
@@ -2271,8 +2284,172 @@ def verify_cached_torrents():
logger.error('Lista de .torrents DESCOMPRIMIDOS: ' + str(descomprimidos))
if errores:
logger.error('Lista de .torrents en ERROR: ' + str(errores))
def regenerate_clones():
logger.info()
import json
from core import videolibrarytools
"""
Regenera los archivos .json que ha sido machacado con la migración. También borrar los archivos tvshow.nfo en
películas.
Método para uso temporal y controlado
"""
try:
#Localiza los paths donde dejar el archivo .json de control, y de la Videoteca
json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json'))
if json_path:
logger.info('Videoteca reparada anteriormente: NOS VAMOS')
return False
json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')
filetools.write(json_path, json.dumps({"CINE_verify": True})) #Evita que se lance otro proceso simultaneo
json_error_path = filetools.join(config.get_runtime_path(), 'error_cached_torrents.json')
json_error_path_BK = filetools.join(config.get_runtime_path(), 'error_cached_torrents_BK.json')
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
movies = config.get_setting("folder_movies")
series = config.get_setting("folder_tvshows")
torrents_movies = filetools.join(videolibrary_path, config.get_setting("folder_movies")) #path de CINE
torrents_series = filetools.join(videolibrary_path, config.get_setting("folder_tvshows")) #path de SERIES
#Cargamos en .json de Newpct1 para ver las listas de valores en settings
fail_over_list = channeltools.get_channel_json(channel_py)
for settings in fail_over_list['settings']: #Se recorren todos los settings
if settings['id'] == "clonenewpct1_channels_list": #Encontramos en setting
fail_over_list = settings['default'] #Carga lista de clones
#Inicializa variables
torren_list = []
torren_list.append(torrents_movies)
#torren_list.append(torrents_series)
i = 0
j = 0
k = 0
descomprimidos = []
errores = []
json_data = dict()
#Recorre las carpetas de CINE y SERIES de la Videoteca, leyendo, descomprimiendo y regrabando los archivos .torrent
for contentType in torren_list:
for root, folders, files in filetools.walk(contentType):
nfo = ''
newpct1 = False
file_list = str(files)
logger.error(file_list)
#Borra los archivos Tvshow.nfo y verifica si el .nfo tiene más de un canal y uno es clone Newpct1
for file in files:
#logger.info('file - nfos: ' + file)
if 'tvshow.nfo' in file:
file_path = filetools.join(root, 'tvshow.nfo')
filetools.remove(file_path)
continue
if '.nfo' in file:
peli_name = file.replace('.nfo', '')
nfo = ''
try:
head_nfo, nfo = videolibrarytools.read_nfo(filetools.join(root, file))
except:
logger.error('** NFO: error de lectura en: ' + file)
break
if not nfo:
logger.error('** NFO: error de lectura en: ' + file)
break
if nfo.ow_force: #Si tiene ow_force lo quitamos para evitar futuros problemas
del nfo.ow_force
try:
filetools.write(filetools.join(root, file), head_nfo + nfo.tojson()) #actualizo el .nfo
except:
logger.error('** NFO: error de escritura en: ' + file)
break
if '.torrent' not in file_list and nfo.emergency_urls:
del nfo.emergency_urls #Si tiene emergency_urls, lo reseteamos
try:
filetools.write(filetools.join(root, file), head_nfo + nfo.tojson()) #actualizo el .nfo
except:
logger.error('** NFO: error de escritura en: ' + file)
break
newpct1 = True #marcamos par a resetar los .jsons
if len(nfo.library_urls) > 1: #Tiene más de un canal?
for canal, url in nfo.library_urls.items():
canal_json = "[%s].json" % canal
if canal_json not in file_list: #Canal zomby, lo borramos
logger.error('pop: ' + canal)
nfo.library_urls.pop(canal, None)
if nfo.emergency_urls:
del nfo.emergency_urls #Si tiene emergency_urls, lo reseteamos
try:
filetools.write(filetools.join(root, file), head_nfo + nfo.tojson()) #actualizo el .nfo
except:
logger.error('** NFO: error de escritura en: ' + file)
break
newpct1 = True #marcamos par a resetar los .jsons
canal_nwepct1 = "'%s'" % canal
if canal_nwepct1 in fail_over_list: #Algún canal es clone de Newpct1
newpct1 = True #Si es que sí, lo marcamos
if nfo.emergency_urls:
del nfo.emergency_urls #Si tiene emergency_urls, lo reseteamos
try:
filetools.write(filetools.join(root, file), head_nfo + nfo.tojson()) #actualizo el .nfo
except:
logger.error('** NFO: error de escritura en: ' + file)
break
#Zona para arrelgar los archivos .json
if not newpct1:
continue
for file in files:
file_path = filetools.join(root, file)
if '.json' in file:
logger.info('** file: ' + file)
canal_json = scrapertools.find_single_match(file, '\[(\w+)\].json')
if canal_json not in nfo.library_urls:
filetools.remove(file_path) #borramos el .json es un zomby
item_movie = ''
try:
item_movie = Item().fromjson(filetools.read(file_path)) #leemos el .json
except:
logger.error('** JSON: error de lectura en: ' + file)
continue
if not item_movie:
logger.error('** JSON: error de lectura en: ' + file)
continue
if item_movie.emergency_urls: del item_movie.emergency_urls
item_movie.channel = canal_json #mombre del canal
item_movie.category = canal_json.capitalize() #categoría
item_movie.url = nfo.library_urls[canal_json] #url
if scrapertools.find_single_match(item_movie.title, '(.*?)\[\d+.\d+\s*.\s*B\]'):
item_movie.title = scrapertools.find_single_match(item_movie.title, '(.*?)\[\d+.\d+\s*.\s*B\]').strip() #quitamos Size
if item_movie.added_replacing: del item_movie.added_replacing #quitamos traza del canal reemplazado
try:
filetools.write(file_path, item_movie.tojson()) #Salvamos el nuevo .json de la película
except:
logger.error('** JSON: error de escritura en: ' + file)
else:
errores += [file]
if '.torrent' in file:
filetools.remove(file_path) #borramos los .torrent salvados
logger.error('** Lista de peliculas reparadas: ' + str(errores))
filetools.write(json_error_path, json.dumps(json_data))
filetools.write(json_error_path_BK, json.dumps(json_data))
filetools.write(json_path, json.dumps({"CINE_verify": True}))
except:
filetools.remove(json_path) #borramos el bloqueo para que se pueda lanzar de nuevo
logger.error('Error en el proceso de REPARACIÓN de Videoteca de CINE')
logger.error(traceback.format_exc())
return True
def dejuice(data):
logger.info()
# Metodo para desobfuscar datos de JuicyCodes
@@ -2285,4 +2462,23 @@ def dejuice(data):
b64_decode = base64.b64decode(b64_data)
dejuiced = jsunpack.unpack(b64_decode)
return dejuiced
return dejuiced
def privatedecrypt(url, headers=None):
data = httptools.downloadpage(url, headers=headers, follow_redirects=False).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
packed = scrapertools.find_single_match(data, '(eval\(.*?);var')
unpacked = jsunpack.unpack(packed)
server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/")
id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'")
if server == '':
if 'powvideo' in unpacked:
id = scrapertools.find_single_match(unpacked, ",description:.'(.*?).'")
server = 'https://powvideo.net'
if server != '' and id != '':
url = '%s/%s' % (server, id)
else:
url = ''
return url

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net|thouth\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'

View File

@@ -57,7 +57,7 @@ def get_platform(full_version=False):
"16": "jarvis", "17": "krypton", "18": "leia"}
code_db = {'10': 'MyVideos37.db', '11': 'MyVideos60.db', '12': 'MyVideos75.db',
'13': 'MyVideos78.db', '14': 'MyVideos90.db', '15': 'MyVideos93.db',
'16': 'MyVideos99.db', '17': 'MyVideos107.db', '18': 'MyVideos108.db'}
'16': 'MyVideos99.db', '17': 'MyVideos107.db', '18': 'MyVideos113.db'}
num_version = xbmc.getInfoLabel('System.BuildVersion')
num_version = re.match("\d+\.\d+", num_version).group(0)

View File

@@ -214,7 +214,7 @@ def render_items(itemlist, parent_item):
if item.fanart:
fanart = item.fanart
else:
fanart = os.path.join(config.get_runtime_path(), "fanart-xmas.jpg")
fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg")
# Creamos el listitem
#listitem = xbmcgui.ListItem(item.title)
@@ -1169,7 +1169,10 @@ def play_torrent(item, xlistitem, mediaurl):
time.sleep(5) #Repetimos cada intervalo
#logger.debug(str(time_limit))
if item.subtitle != '':
time.sleep(5)
xbmc_player.setSubtitles(item.subtitle)
#subt = xbmcgui.ListItem(path=item.url, thumbnailImage=item.thumbnail)
#subt.setSubtitles([item.subtitle])
if item.strm_path and is_playing(): #Sólo si es de Videoteca
from platformcode import xbmc_videolibrary

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -21,7 +20,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
data = httptools.downloadpage(page_url).data
patron = "DownloadButtonAd-startDownload gbtnSecondary.*?href='([^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) == 0:
patron = 'Download file.*?href="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 0:
video_urls.append([matches[0][-4:] + " [mediafire]", matches[0]])
for video_url in video_urls:

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urllib, random, base64
from core import httptools
from core import jsontools
@@ -12,7 +12,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
page_url = page_url.replace("http://netu.tv/watch_video.php?v=", "http://hqq.tv/player/embed_player.php?vid=")
page_url = page_url.replace("/watch_video.php?v=", "/player/embed_player.php?vid=")
data = httptools.downloadpage(page_url).data
if "var userid = '';" in data.lower():
return False, "[netutv] El archivo no existe o ha sido borrado"
@@ -21,72 +21,74 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
if "hash=" in page_url:
data = urllib.unquote(httptools.downloadpage(page_url).data)
id_video = scrapertools.find_single_match(data, "vid':'([^']+)'")
page_url = "http://hqq.watch/player/embed_player.php?vid=%s" % id_video
else:
id_video = page_url.rsplit("=", 1)[1]
page_url_hqq = "http://hqq.watch/player/embed_player.php?vid=%s&autoplay=no" % id_video
data_page_url_hqq = httptools.downloadpage(page_url_hqq, add_referer=True).data
js_wise = scrapertools.find_single_match(data_page_url_hqq,
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data_unwise = jswise(js_wise).replace("\\", "")
at = scrapertools.find_single_match(data_unwise, 'at=(\w+)')
http_referer = scrapertools.find_single_match(data_unwise, 'http_referer=(.*?)&')
url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on" \
"&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=" % (id_video, at, http_referer)
data_player = httptools.downloadpage(url, add_referer=True).data
data_unescape = scrapertools.find_multiple_matches(data_player, 'document.write\(unescape\("([^"]+)"')
data = ""
for d in data_unescape:
data += urllib.unquote(d)
subtitle = scrapertools.find_single_match(data, 'value="sublangs=Spanish.*?sub=([^&]+)&')
if not subtitle:
subtitle = scrapertools.find_single_match(data, 'value="sublangs=English.*?sub=([^&]+)&')
data_unwise_player = ""
js_wise = scrapertools.find_single_match(data_player,
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
if js_wise:
data_unwise_player = jswise(js_wise).replace("\\", "")
vars_data = scrapertools.find_single_match(data, '/player/get_md5.php",\s*\{(.*?)\}')
matches = scrapertools.find_multiple_matches(vars_data, '\s*([^:]+):\s*([^,]*)[,"]')
params = {}
for key, value in matches:
if key == "adb":
params[key] = "0/"
elif '"' in value:
params[key] = value.replace('"', '')
else:
value_var = scrapertools.find_single_match(data, 'var\s*%s\s*=\s*"([^"]+)"' % value)
if not value_var and data_unwise_player:
value_var = scrapertools.find_single_match(data_unwise_player, 'var\s*%s\s*=\s*"([^"]+)"' % value)
params[key] = value_var
params = urllib.urlencode(params)
head = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
data = httptools.downloadpage("http://hqq.watch/player/get_md5.php?" + params, headers=head).data
media_urls = []
url_data = jsontools.load(data)
media_url = tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
if not media_url.startswith("http"):
media_url = "https:" + media_url
video_urls = []
media = media_url + "|User-Agent=Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X)"
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [netu.tv]", media, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
page_url = page_url.replace("/watch_video.php?v=", "/player/embed_player.php?vid=")
page_url = page_url.replace('https://netu.tv/', 'http://hqq.watch/')
page_url = page_url.replace('https://waaw.tv/', 'http://hqq.watch/')
data = httptools.downloadpage(page_url).data
# ~ logger.debug(data)
js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data = jswise(js_wise).replace("\\", "")
# ~ logger.debug(data)
alea = str(random.random())[2:]
data_ip = httptools.downloadpage('http://hqq.watch/player/ip.php?type=json&rand=%s' % alea).data
# ~ logger.debug(data_ip)
json_data_ip = jsontools.load(data_ip)
url = scrapertools.find_single_match(data, 'self\.location\.replace\("([^)]+)\)')
url = url.replace('"+rand+"', alea)
url = url.replace('"+data.ip+"', json_data_ip['ip'])
url = url.replace('"+need_captcha+"', '0') #json_data_ip['need_captcha'])
url = url.replace('"+token', '')
# ~ logger.debug(url)
headers = { "User-Agent": 'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.127 Large Screen Safari/533.4 GoogleTV/162671' }
data = httptools.downloadpage('http://hqq.watch'+url, headers=headers).data
# ~ logger.debug(data)
codigo_js = scrapertools.find_multiple_matches(data, '<script>document.write\(unescape\("([^"]+)')
# ~ logger.debug(codigo_js)
js_aux = urllib.unquote(codigo_js[0])
at = scrapertools.find_single_match(js_aux, 'var at = "([^"]+)')
js_aux = urllib.unquote(codigo_js[1])
var_link_1 = scrapertools.find_single_match(js_aux, '&link_1=\\"\+encodeURIComponent\(([^)]+)')
var_server_2 = scrapertools.find_single_match(js_aux, '&server_2=\\"\+encodeURIComponent\(([^)]+)')
vid = scrapertools.find_single_match(js_aux, '&vid=\\"\+encodeURIComponent\(\\"([^"]+)')
ext = '.mp4.m3u8'
# ~ logger.debug('%s %s %s %s' % (at, var_link_1, var_server_2, vid))
js_wise = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
data = jswise(js_wise).replace("\\", "")
# ~ logger.debug(data)
variables = scrapertools.find_multiple_matches(data, 'var ([a-zA-Z0-9]+) = "([^"]+)";')
# ~ logger.debug(variables)
for nombre, valor in variables:
# ~ logger.debug('%s %s' % (nombre, valor))
if nombre == var_link_1: link_1 = valor
if nombre == var_server_2: server_2 = valor
link_m3u8 = 'http://hqq.watch/player/get_md5.php?ver=2&at=%s&adb=0&b=1&link_1=%s&server_2=%s&vid=%s&ext=%s' % (at, link_1, server_2, vid, ext)
# ~ logger.debug(link_m3u8)
video_urls.append(["[netu.tv]", link_m3u8])
return video_urls
## Obtener la url del m3u8
def tb(b_m3u8_2):
j = 0
s2 = ""
while j < len(b_m3u8_2):
s2 += "\\u0" + b_m3u8_2[j:(j + 3)]
j += 3
return s2.decode('unicode-escape').encode('ASCII', 'ignore')
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://uqload.com/embed-([a-z0-9]+).html",
"url": "https://uqload.com/embed-\\1.html"
}
]
},
"free": true,
"id": "uqload",
"name": "uqload",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://uqload.com/img/logo.png?v=0"
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Uqload By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Uqload] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'sources:.?\["([^"]+)"\]'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(["[uqload]", url])
return video_urls