Merge remote-tracking branch 'alfa-addon/master' into fixes

This commit is contained in:
Unknown
2018-12-28 23:13:45 -03:00
90 changed files with 3050 additions and 1368 deletions

View File

@@ -626,7 +626,7 @@ class platform(Platformtools):
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
if not channelpath:
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
channelname = os.path.basename(channelpath).replace(".py", "")
channelname = os.path.basename(channelpath).split(".")[0]
ch_type = os.path.basename(os.path.dirname(channelpath))
# Si no tenemos list_controls, hay que sacarlos del json del canal

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.17" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.19" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -10,8 +10,8 @@
<extension point="xbmc.addon.metadata">
<summary lang="es">Navega con Kodi por páginas web.</summary>
<assets>
<icon>logo-cumple.png</icon>
<fanart>fanart1.jpg</fanart>
<icon>logo-n.jpg</icon>
<fanart>fanart-xmas.jpg</fanart>
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
@@ -19,15 +19,16 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ Todopeliculas ¤ Maxipelis24 ¤ allcalidad
¤ descargacineclasico ¤ porntrex ¤ seriesmetro
¤ pedropolis ¤ thumzilla ¤ xms
¤ allcalidad ¤ asialiveaction ¤ repelis
¤ DoramasMP4 ¤ CanalPelis ¤ Vi2
¤ PeliculonHD ¤ PeliculasHD ¤ UltraPeliculasHD
¤ Newpct1 ¤ maxipelis24 ¤ repelis.live
¤ cuevana2 ¤ cuevana2espanol
[COLOR green][B]Novedades[/B][/COLOR]
¤ cine24h ¤ hdfilmologia ¤ pelis24
¤ pelishd24 ¤ pelisplay
¤ pack +18
¤ Agradecimientos a @chivmalev por colaborar con ésta versión
¤ Agradecimientos a @mrgaturus, @GeorgeRamga y @chivmalev por colaborar con ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -20,7 +20,7 @@
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
"Latino"
]
},
{

View File

@@ -11,7 +11,7 @@ from core.item import Item
from platformcode import config, logger
IDIOMAS = {'Latino': 'LAT'}
IDIOMAS = {'Latino': 'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'vimeo', 'netutv']
@@ -19,7 +19,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'v
__channel__='allcalidad'
host = "http://allcalidad.com/"
host = "http://allcalidad.net/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)

View File

@@ -32,11 +32,11 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php",
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/lista-de-anime.php",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", contentTitle="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
thumbnail=thumb_series, range=[0,19] ))
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", contentTitle="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
thumbnail=thumb_series, range=[0,19]))
@@ -109,14 +109,14 @@ def lista(item):
context2 = autoplay.context
context.extend(context2)
scrapedurl=host+scrapedurl
if item.title!="Series":
if item.contentTitle!="Series":
itemlist.append(item.clone(title=scrapedtitle, contentTitle=show,url=scrapedurl,
thumbnail=scrapedthumbnail, action="findvideos", context=context))
else:
itemlist.append(item.clone(title=scrapedtitle, contentSerieName=show,url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", context=context))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', action='lista'))
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', contentTitle=item.title, action='lista'))
return itemlist

View File

@@ -97,13 +97,13 @@ def episodios(item):
data = data.replace('"ep0','"epp"')
patron = '(?is)<div id="ep(\d+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'href="([^"]+)" target="_blank"'
patron += '(href.*?)fa fa-download'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedepi, scrapedthumbnail, scrapedurl in matches:
url = scrapedurl
for scrapedepi, scrapedthumbnail, scrapedurls in matches:
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
itemlist.append(item.clone(action='findvideos', title=title, url=url, thumbnail=scrapedthumbnail, type=item.type,
infoLabels=item.infoLabels))
urls = scrapertools.find_multiple_matches(scrapedurls, 'href="([^"]+)')
itemlist.append(item.clone(action='findvideos', title=title, url=item.url, thumbnail=scrapedthumbnail, type=item.type,
urls = urls, infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
url=item.url, action="add_serie_to_library", extra="episodios",
@@ -182,35 +182,37 @@ def lista(item):
def findvideos(item):
logger.info()
itemlist = []
dl_links = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
### obtiene los gvideo
patron = 'class="Button Sm fa fa-download mg"></a><a target="_blank" rel="nofollow" href="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for dl_url in matches:
g_data = httptools.downloadpage(dl_url).data
video_id = scrapertools.find_single_match(g_data, 'jfk-button jfk-button-action" href="([^"]+)">')
g_url = '%s%s' % ('https://drive.google.com', video_id)
g_url = g_url.replace('&amp;', '&')
g_data = httptools.downloadpage(g_url, follow_redirects=False, only_headers=True).headers
url = g_data['location']
dl_links.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels))
if item.type == 'pl':
new_url = scrapertools.find_single_match(data, '<div class="player">.*?<a href="([^"]+)" target')
data = httptools.downloadpage(new_url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="btn.*?" data-video="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for video_id in matches:
url_data = httptools.downloadpage('https://tinyurl.com/%s' % video_id, follow_redirects=False)
url = url_data.headers['location']
itemlist.append(Item(channel=item.channel, title = '%s', url=url, action='play', infoLabels=item.infoLabels))
patron = '<iframe src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
if not item.urls:
data = httptools.downloadpage(item.url).data
matches = scrapertools.find_multiple_matches(data, 'http://www.sutorimu[^"]+')
else:
matches = item.urls
for url in matches:
itemlist.append(item.clone(title = '%s', url=url, action='play'))
itemlist.extend(dl_links)
if "spotify" in url:
continue
data = httptools.downloadpage(url).data
bloque = scrapertools.find_single_match(data, "description articleBody(.*)/div")
urls = scrapertools.find_multiple_matches(bloque, "iframe src='([^']+)")
if urls:
# cuando es streaming
for url1 in urls:
if "luis" in url1:
data = httptools.downloadpage(url1).data
url1 = scrapertools.find_single_match(data, 'file: "([^"]+)')
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = url1))
else:
# cuando es descarga
bloque = bloque.replace('"',"'")
urls = scrapertools.find_multiple_matches(bloque, "href='([^']+)")
for url2 in urls:
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = url2))
if "data-video" in bloque:
urls = scrapertools.find_multiple_matches(bloque, 'data-video="([^"]+)')
for url2 in urls:
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = "https://tinyurl.com/%s" %url2 ))
for item1 in itemlist:
if "tinyurl" in item1.url:
item1.url = httptools.downloadpage(item1.url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)

View File

@@ -0,0 +1,15 @@
{
"id": "camwhoresbay",
"name": "camwhoresbay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.camwhoresbay.com/images/porntrex.ico",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.camwhoresbay.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="thumb" src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item ">.*?'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -3,7 +3,7 @@
"name": "CanalPelis",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["cast", "lat", "vose"],
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/canalpelisbg.jpg",
"thumbnail": "http://www.canalpelis.com/wp-content/uploads/2016/11/logo_web.gif",
"banner": "",
@@ -44,6 +44,38 @@
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -52,11 +52,11 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas", action="peliculas",thumbnail=get_thumb('movies', auto=True),
itemlist.append(item.clone(title="Peliculas", action="peliculas", thumbnail=get_thumb('movies', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'movies/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Géneros", action="generos",thumbnail=get_thumb('genres', auto=True),
itemlist.append(item.clone(title="Géneros", action="generos", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'genre/', viewmode="movie_with_plot"))
@@ -64,7 +64,7 @@ def mainlist(item):
text_blod=True, page=0, viewcontent='movies', url=host + 'release/',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0))
itemlist.append(item.clone(title="Series", action="series", extra='serie', url=host + 'tvshows/',
@@ -122,6 +122,34 @@ def sub_search(item):
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + "genre/cine-animacion/"
elif categoria == 'terror':
item.url = host + "genre/cine-terror/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
@@ -137,7 +165,7 @@ def peliculas(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 20]:
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 30]:
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
@@ -148,12 +176,12 @@ def peliculas(item):
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year},
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer", quality = quality))
title=title, context="buscar_trailer", quality=quality))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 20 < len(matches):
itemlist.append(item.clone(page=item.page + 20,
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(
@@ -223,13 +251,12 @@ def series(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">.*?'
patron += '<div class="texto">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches[item.page:item.page + 30]:
if plot == '':
plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>')
scrapedtitle = scrapedtitle.replace('Ver ', '').replace(
@@ -238,13 +265,20 @@ def series(item):
contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot,
thumbnail=scrapedthumbnail, contentType='tvshow'))
url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
# url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="series",
title="» Siguiente »", url=url_next_page))
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(
data, '<link rel="next" href="([^"]+)" />')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
@@ -348,27 +382,32 @@ def episodios(item):
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
import urllib
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
patron = "data-post='(\d+)' data-nume='(\d+)'.*?img src='([^']+)'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
if lang == 'en':
lang = 'VOSE'
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'movie'}
lang = lang.lower().strip()
idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]',
'es': '[COLOR green](CAST)[/COLOR]',
'en': '[COLOR red](VOSE)[/COLOR]',
'gb': '[COLOR red](VOSE)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type': 'movie'}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer': item.url}).data
hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
new_data = httptools.downloadpage(hidden_url, follow_redirects=False)
try:
b64_url = scrapertools.find_single_match(new_data.headers['location'], "y=(.*)")
url = base64.b64decode(b64_url)
@@ -376,10 +415,11 @@ def findvideos(item):
url = hidden_url
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title='%s', action='play', language=lang,
infoLabels=item.infoLabels))
Item(channel=item.channel, action='play', language=lang, infoLabels=item.infoLabels,
url=url, title='Ver en: ' + '[COLOR yellowgreen]%s [/COLOR]' + lang))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
itemlist.sort(key=lambda it: it.language, reverse=False)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",

View File

@@ -232,11 +232,11 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'iframe-.*?src="([^"]+)')
data = httptools.downloadpage(url).data
patron = '<a href="([^"]+)'
patron = '(?i)src=&quot;([^&]+)&'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
if ".gif" in scrapedurl:
continue
title = "Ver en: %s"
itemlist.append(item.clone(action = "play",
title = title,

View File

@@ -3,14 +3,15 @@
"name": "Cine24H",
"active": true,
"adult": false,
"language": ["lat", "cast", "eng"],
"language": ["lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vose"
"vose",
"direct"
],
"settings": [
{

View File

@@ -138,10 +138,10 @@ def peliculas(item):
contentType = 'movie'
title = scrapedtitle
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))
itemlist.append(Item(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

View File

@@ -33,7 +33,7 @@ def mainlist(item):
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "movies",
url = host + "peliculas-destacadas", thumbnail = get_thumb("favorites", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search",
url = host + "search/", thumbnail = get_thumb("search", auto = True)))
url = host + "pelicula/?s=", thumbnail = get_thumb("search", auto = True)))
# SERIES
itemlist.append(Item(channel = item.channel, title = "--- Series ---", folder=False, text_bold=True))
@@ -152,7 +152,6 @@ def searchShows(itemlist, item, texto):
itemlist.append(Item(channel = item.channel, title=title, url=host + link, action="episodes"))
def searchMovies(itemlist, item, texto):
texto = texto.replace(' ', '+').lower()
data = load_data(item.url + texto)
#patron para buscar las peliculas
pattern = '<a href="([^"]+)"><div class="img">' #link
@@ -160,14 +159,12 @@ def searchMovies(itemlist, item, texto):
pattern += '<span style="width:([0-9]+)%">.*?'
pattern += '"txt">(.*?)</div>' # text
#coloca las peliculas encontradas en la lista, improvisando do while
next_page = True
while next_page:
put_movies(itemlist, item, data, pattern)
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
#ahora ya no se necesita el do while
put_movies(itemlist, item, data, pattern)
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page:
data = load_data(next_page)
if next_page:
itemlist.append(Item(channel = item.channel, title='Siguiente Pagina', url=next_page, action="movies"))
def search(item, texto):
itemlist = []
@@ -203,7 +200,10 @@ def age(item):
def GKPluginLink(hash):
hashdata = urllib.urlencode({r'link':hash})
json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data
try:
json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data
except:
return None
logger.info(jsontools.load(json))
data = jsontools.load(json) if json else False

View File

@@ -152,7 +152,10 @@ def search(item, text):
def GKPluginLink(hash):
hashdata = urllib.urlencode({r'link':hash})
json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data
try:
json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data
except:
return None
logger.info(jsontools.load(json))
data = jsontools.load(json) if json else False

View File

@@ -0,0 +1,15 @@
{
"id": "czechvideo",
"name": "Czechvideo",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://czechvideo.org/templates/Default/images/black75.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://czechvideo.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/tags/%s/" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<ul class="cat_menu" id="cat_menu_c0">(.*?)</ul>')
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
scrapedurl = host + scrapedurl
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="short-story">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?div class="short-time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
url = scrapertools.find_single_match(data,'<iframe src=.*?<iframe src="([^"]+)"')
url = "http:" + url
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -8,5 +8,15 @@
"thumbnail": "descargacineclasico2.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -16,17 +16,20 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www.doramasmp4.com/'
host = 'https://www2.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
@@ -37,10 +40,15 @@ def mainlist(item):
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Variedades", action="list_all",
url=host + 'catalogue?format%5B%5D=varieties&sort=latest',
thumbnail='', type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?format=pelicula', thumbnail=get_thumb('movies', auto=True),
type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'ajax/search.php',
url=host + 'catalogue?format%5B%5D=movie&sort=latest',
thumbnail=get_thumb('movies', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
@@ -52,8 +60,9 @@ def doramas_menu(item):
itemlist =[]
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
thumbnail=get_thumb('all', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all",
url=host + 'catalogue?format%5B%5D=drama&sort=latest', thumbnail=get_thumb('all', auto=True),
type='dorama'))
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
return itemlist
@@ -62,22 +71,25 @@ def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class=col-lg-2 col-md-3 col-6><a href=(.*?) title=.*?'
patron += '<img src=(.*?) alt=(.*?) class=img-fluid>.*?bg-primary text-capitalize>(.*?)</span>'
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype in matches:
media_type = item.type
for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
url = scrapedurl
scrapedtype = scrapedtype.lower()
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail, type=scrapedtype)
if scrapedtype != 'dorama':
thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
if media_type != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
new_item.type = item.type
else:
new_item.contentSerieName=scrapedtitle
@@ -88,55 +100,22 @@ def list_all(item):
# Paginacion
if itemlist != []:
if item.type != 'dorama':
page_base = host+'catalogue?type[]=pelicula'
else:
page_base = host + 'catalogue'
next_page = scrapertools.find_single_match(data, '<a href=([^ ]+) aria-label=Netx>')
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" aria-label="Netx">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=page_base+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
url=host+'catalogue'+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
type=item.type))
return itemlist
def search_results(item):
logger.info()
itemlist=[]
data = httptools.downloadpage(item.url, post=item.post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<a class=media p-2 href=(.*?)><img class=mr-2 src=(.*?)>.*?500>(.*?)</div>'
patron += '<div class=text-muted tx-11>(.*?)</div>.*?200>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches:
new_item = Item(channel=item.channel, url=scrapedurl, thumbnail=scrapedthumbnail, title=scrapedtitle)
if scrapedtype != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.contentSerieName=scrapedtitle
new_item.action = 'episodios'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = '<div class=col-lg-3 col-md-6 mb-2><a href=(.*?) title=.*?'
patron +='<img src=(.*?) alt.*?truncate-width>(.*?)<.*?mb-1>(.*?)<'
patron = 'shadow-lg rounded" href="([^"]+)".*?src="([^"]+)".*?style="">([^<]+)<.*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
title = '%s %s' % (scrapedtitle, scrapedep)
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
@@ -151,8 +130,7 @@ def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a itemprop=url href=(.*?) title=.*? class=media.*?truncate-width>(.*?)<.*?'
patron +='text-muted mb-1>Capítulo (.*?)</div>'
patron = '<a itemprop="url".*?href="([^"]+)".*?title="(.*?) Cap.*?".*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -186,77 +164,56 @@ def findvideos(item):
logger.info()
itemlist = []
duplicated = []
headers={'referer':item.url}
data = get_source(item.url)
patron = 'animated pulse data-url=(.*?)>'
patron = 'link="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
language = IDIOMAS['vo']
else:
language = IDIOMAS['sub']
if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
item.type = 'dorama'
#if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
# if item.type !='episode' and item.type != 'movie':
# item.type = 'dorama'
# item.contentSerieName = item.contentTitle
# item.contentTitle = ''
# return episodios(item)
# else:
for video_url in matches:
headers = {'referer': video_url}
token = scrapertools.find_single_match(video_url, 'token=(.*)')
if 'fast.php' in video_url:
video_url = 'https://player.rldev.in/fast.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = 'https://www2.doramasmp4.com/api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
if len(itemlist) == 0 and item.type == 'search':
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodios(item)
else:
for video_url in matches:
video_data = httptools.downloadpage(video_url, headers=headers).data
server = ''
if 'Media player DMP4' in video_data:
url = scrapertools.find_single_match(video_data, "sources: \[\{'file':'(.*?)'")
server = 'Directo'
else:
url = scrapertools.find_single_match(video_data, '<iframe src="(.*?)".*?scrolling="no"')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
if server !='':
new_item.server = server
itemlist.append(new_item)
# Requerido para FilterTools
# for video_item in itemlist:
# if 'sgl.php' in video_item.url:
# headers = {'referer': item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
# video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# duplicated.append(video_item.url)
# video_item.channel = item.channel
# video_item.infoLabels = item.infoLabels
# video_item.language=IDIOMAS['sub']
#
# patron = 'var item = {id: (\d+), episode: (\d+),'
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for id, episode in matches:
# data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
# sources = data_json['options']
# for src in sources:
# url = sources[src]
#
# if 'sgl.php' in url:
# headers = {'referer':item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(url, headers = headers).data
# url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
# infoLabels=item.infoLabels)
# if url != '' and url not in duplicated:
# itemlist.append(new_item)
# duplicated.append(url)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para FilterTools
# Requerido para AutoPlay
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
autoplay.start(itemlist, item)
return itemlist
@@ -266,14 +223,11 @@ def search(item, texto):
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url
post = {'q':texto}
post = urllib.urlencode(post)
item.url = item.url + texto
item.type = 'search'
item.post = post
if texto != '':
try:
return search_results(item)
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -0,0 +1,17 @@
{
"id": "eroticasonlinetv",
"name": "eroticasonlinetv",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.peliculaseroticasonline.tv/wp-content/themes/wpeliculaseroticasonlinetv/favicon.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.peliculaseroticasonline.tv'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="movie-poster"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
plot = ""
contentTitle = scrapedtitle
url = urlparse.urljoin(item.url,scrapedurl)
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=scrapedtitle , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente &raquo;</a>')
if next_page:
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
url = urlparse.urljoin(item.url, url)
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
if url == "":
url = scrapertools.find_single_match(data, 'window.location="([^"]+)"')
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -3,29 +3,30 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = "https://www.youfreeporntube.net"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos",
url="http://www.ero-tik.com/newvideos.html?&page=1"))
url= host + "/new-clips.html?&page=1"))
itemlist.append(
Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.ero-tik.com/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Top ultima semana",
url="http://www.ero-tik.com/topvideos.html?do=recent"))
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Populares",
url=host + "/topvideo.html?page=1"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url="http://www.ero-tik.com/search.php?keywords="))
url=host + "/search.php?keywords="))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "{0}{1}".format(item.url, texto)
try:
@@ -41,96 +42,73 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<div class="pm-li-category"><a href="([^"]+)">.*?.<h3>(.*?)</h3></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, actriz in matches:
itemlist.append(Item(channel=item.channel, action="listacategoria", title=actriz, url=url))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()
# Añade al listado
itemlist.append(Item(channel=item.channel, action="play", thumbnail=thumbnail, fanart=thumbnail, title=title,
fulltitle=title, url=url,
viewmode="movie", folder=True))
paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')
if paginacion:
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente",
url="http://ero-tik.com/" + paginacion))
url=host + "/" + paginacion))
return itemlist
def listacategoria(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()
# Añade al listado
itemlist.append(
Item(channel=item.channel, action="play", thumbnail=thumbnail, title=title, fulltitle=title, url=url,
viewmode="movie", folder=True))
paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')
if paginacion:
itemlist.append(
Item(channel=item.channel, action="listacategoria", title=">> Página Siguiente", url=paginacion))
return itemlist
def play(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.unescape(data)
logger.info(data)
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = item.title
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, 'Playerholder.*?src="([^"]+)"')
if "tubst.net" in item.url:
url = scrapertools.find_single_match(data, 'itemprop="embedURL" content="([^"]+)')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
item.url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -1,22 +0,0 @@
{
"id": "filesmonster_catalogue",
"name": "Filesmonster Catalogue",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "filesmonster_catalogue.png",
"banner": "filesmonster_catalogue.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,397 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
from core import scrapertools
from core.item import Item
from platformcode import config, logger
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
user = config.get_setting("filesmonsteruser")
itemlist = []
itemlist.append(Item(channel=item.channel, action="unusualporn", title="Canal unusualporn.net",
thumbnail="http://filesmonster.biz/img/logo.png"))
itemlist.append(Item(channel=item.channel, action="files_monster", title="Canal files-monster.org",
thumbnail="http://files-monster.org/template/static/images/logo.jpg"))
itemlist.append(Item(channel=item.channel, action="filesmonster", title="Canal filesmonster.filesdl.net",
thumbnail="http://filesmonster.biz/img/logo.png"))
if user != '': itemlist.append(
Item(channel=item.channel, action="favoritos", title="Favoritos en filesmonster.com del usuario " + user,
folder=True))
return itemlist
def filesmonster(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Ultimos vídeos",
thumbnail="http://photosex.biz/imager/w_400/h_400/9f869c6cb63e12f61b58ffac2da822c9.jpg",
url="http://filesmonster.filesdl.net"))
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg",
url="http://filesmonster.filesdl.net"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en filesmonster.fliesdl.net",
url="http://filesmonster.filesdl.net/posts/search?q=%s"))
return itemlist
def unusualporn(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos_2", title="Últimos vídeos", url="http://unusualporn.net/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="categorias_2", title="Categorías", url="http://unusualporn.net/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en unusualporn",
url="http://unusualporn.net/search/%s"))
return itemlist
def files_monster(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="videos_3", title="Últimos vídeos", url="http://www.files-monster.org/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(
Item(channel=item.channel, action="categorias_3", title="Categorías", url="http://www.files-monster.org/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en files-monster.org",
url="http://files-monster.org/search?search=%s"))
return itemlist
def favoritos(item):
user = config.get_setting("filesmonsteruser")
password = config.get_setting("filesmonsterpassword")
logger.info()
name_file = os.path.splitext(os.path.basename(__file__))[0]
fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt")
fa = open(fname, 'a+')
fa.close()
f = open(fname, 'r')
lines = f.readlines()
f.close()
itemlist = []
post2 = "username=" + user + "&password=" + password
login_url = "http://filesmonster.com/api/public/login"
data1 = scrapertools.cache_page(login_url, post=post2)
partes1 = data1.split('"')
estado = partes1[3]
if estado != 'success': itemlist.append(Item(channel=item.channel,
title="No pudo accederse con tus datos de acceso de Filesmonster.com, introdúcelos en con el apartado figuración. Error: " + estado + data1))
url_favoritos = "http://filesmonster.com/?favorites=1"
data2 = scrapertools.cache_page(url_favoritos, post=post2)
data2 = scrapertools.find_single_match(data2, 'favorites-table(.*?)pager')
patronvideos = '<a href="([^"]+)">([^<]+)</a>.*?del=([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data2)
contador = 0
for url, title, borrar in matches:
contador = contador + 1
imagen = ''
for linea in lines:
partes2 = linea.split("@")
parte_url = partes2[0]
parte_imagen = partes2[1]
if (parte_url == url): imagen = parte_imagen.rstrip('\n').rstrip('\r')
if url.find("?fid=") == -1:
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.title,
url=url, thumbnail=imagen, folder=False))
else:
itemlist.append(
Item(channel=item.channel, action="detail", server="filesmonster", title=title, fulltitle=title,
thumbnail=imagen, url=url, folder=True))
itemlist.append(Item(channel=item.channel, action="quitar_favorito",
title="(-) quitar de mis favoritos en filesmonster.com", thumbnail=imagen,
url="http://filesmonster.com/?favorites=1&del=" + borrar, plot=borrar))
itemlist.append(Item(channel=item.channel, title="", folder=True))
if contador == 0 and estado == 'success':
itemlist.append(
Item(channel=item.channel, title="No tienes ningún favorito, navega por las diferentes fuentes y añádelos"))
return itemlist
def quitar_favorito(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
itemlist.append(Item(channel=item.channel, action="favoritos",
title="El vídeo ha sido eliminado de tus favoritos, pulsa para volver a tu lista de favoritos"))
return itemlist
def anadir_favorito(item):
logger.info()
name_file = os.path.splitext(os.path.basename(__file__))[0]
fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt")
user = config.get_setting("filesmonsteruser")
password = config.get_setting("filesmonsterpassword")
itemlist = []
post2 = "username=" + user + "&password=" + password
login_url = "http://filesmonster.com/api/public/login"
data1 = scrapertools.cache_page(login_url, post=post2)
if item.plot == 'el archivo':
id1 = item.url.split('?id=')
id = id1[1]
que = "file"
if item.plot == 'la carpeta':
id1 = item.url.split('?fid=')
id = id1[1]
que = "folder"
url = "http://filesmonster.com/ajax/add_to_favorites"
post3 = "username=" + user + "&password=" + password + "&id=" + id + "&obj_type=" + que
data2 = scrapertools.cache_page(url, post=post3)
if data2 == 'Already in Your favorites': itemlist.append(Item(channel=item.channel, action="favoritos",
title="" + item.plot + " ya estaba en tu lista de favoritos (" + user + ") en Filesmonster"))
if data2 != 'You are not logged in' and data2 != 'Already in Your favorites':
itemlist.append(Item(channel=item.channel, action="favoritos",
title="Se ha añadido correctamente " + item.plot + " a tu lista de favoritos (" + user + ") en Filesmonster",
plot=data1 + data2))
f = open(fname, "a+")
if (item.plot == 'la carpeta'):
ruta = "http://filesmonster.com/folders.php?"
if (item.plot == 'el archivo'):
ruta = "http://filesmonster.com/download.php"
laruta = ruta + item.url
laruta = laruta.replace("http://filesmonster.com/folders.php?http://filesmonster.com/folders.php?",
"http://filesmonster.com/folders.php?")
laruta = laruta.replace("http://filesmonster.com/download.php?http://filesmonster.com/download.php?",
"http://filesmonster.com/download.php?")
f.write(laruta + '@' + item.thumbnail + '\n')
f.close()
if data2 == 'You are not logged in': itemlist.append(Item(channel=item.channel, action="favoritos",
title="No ha sido posible añadir " + item.plot + " a tu lista de favoritos (" + user + " no logueado en Filesmonster)", ))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
data = scrapertools.find_single_match(data,
'Categories <b class="caret"></b></a>(.*?)RSS <b class="caret"></b></a>')
patronvideos = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos", title=title, url=url))
return itemlist
def categorias_2(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '<li class="cat-item cat-item-[\d]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a><a class="rss_s" title="[^"]+" target="_blank" href="[^"]+"></a></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos_2", title=title, url=url))
return itemlist
def categorias_3(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos_3", title=title, url=url))
return itemlist
def search(item, texto):
logger.info("texto:" + texto)
original = item.url
item.url = item.url % texto
try:
if original == 'http://filesmonster.filesdl.net/posts/search?q=%s':
return videos(item)
if original == 'http://unusualporn.net/search/%s':
return videos_2(item)
if original == 'http://files-monster.org/search?search=%s':
return videos_3(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def videos(item):
logger.info()
itemlist = []
url = item.url
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = '<div class="panel-heading">.*?<a href="([^"]+)">([^<]+).*?</a>.*?<div class="panel-body" style="text-align: center;">.*?<img src="([^"]+)".*?'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title, thumbnail in matches:
title = title.strip()
itemlist.append(
Item(channel=item.channel, action="detail", title=title, fulltitle=title, url=url, thumbnail=thumbnail))
url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>').replace("&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(Item(channel=item.channel, action="videos", title=">> Página Siguiente", url=url))
return itemlist
def videos_2(item):
logger.info()
itemlist = []
url_limpia = item.url.split("?")[0]
url = item.url
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = 'data-link="([^"]+)" data-title="([^"]+)" src="([^"]+)" border="0" />';
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title, thumbnail in matches:
itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
thumbnail=thumbnail))
url = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace("&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(Item(channel=item.channel, action="videos_2", title=">> Página Siguiente", url=url))
return itemlist
def videos_3(item):
logger.info()
itemlist = []
url = item.url
url_limpia = item.url.split("?")[0]
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = '<a href="([^"]+)">.*?<img src="([^"]+)" border="0" title=".*?([^"]+).*?" height="70" />'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, thumbnail, title in matches:
itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
thumbnail=thumbnail))
url = scrapertools.find_single_match(data,
'<a style="text-decoration:none;" href="([^"]+)">&rarr;</a>').replace(
"&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(
Item(channel=item.channel, action="videos_3", title=">> Página Siguiente", url=url_limpia + url))
return itemlist
def detail(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '["|\'](http\://filesmonster.com/download.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
url=url, thumbnail=item.thumbnail, folder=False))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="el archivo", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
if not url == item.url:
logger.info(url)
logger.info(item.url)
title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, folder=True))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="la carpeta", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
return itemlist
def detail_2(item):
logger.info()
itemlist = []
# descarga la pagina
data = scrapertools.downloadpageGzip(item.url)
data = data.split('<span class="filesmonsterdlbutton">Download from Filesmonster</span>')
data = data[0]
# descubre la url
patronvideos = 'href="http://filesmonster.com/download.php(.*?)".(.*?)'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match2 in matches:
url = "http://filesmonster.com/download.php" + match2[0]
title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
url=url, thumbnail=item.thumbnail, folder=False))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=match2[0],
thumbnail=item.thumbnail, plot="el archivo", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
if not url == item.url:
logger.info(url)
logger.info(item.url)
title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, folder=True))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="la carpeta", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
return itemlist

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://fxtimg.com/xlogo_.png.pagespeed.ic.doVRQMV5ub.png",
"thumbnail": "http://fxtimg.com/xlogo_.png.pagespeed.ic.doVRQMV5ub.png|Referer=http://es.foxtube.com",
"banner": "",
"categories": [
"adult"

View File

@@ -54,13 +54,13 @@ def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<a class="thumb tco1" href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)">.*?<i class="m tc2">(.*?)</i>'
patron = '<a class="thumb tco1" href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)".*?<i class="m tc2">(.*?)</i>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = host + scrapedurl
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
thumbnail = scrapedthumbnail + "|Referer=%s" %host
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
@@ -80,7 +80,7 @@ def play(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=scrapedurl, url=scrapedurl,
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -1,21 +0,0 @@
{
"id": "freecambay",
"name": "FreeCamBay",
"language": ["*"],
"active": true,
"adult": true,
"thumbnail": "http://i.imgur.com/wuzhOCt.png?1",
"categories": [
"adult"
],
"settings": [
{
"id": "menu_info",
"type": "bool",
"label": "Mostrar menú antes de reproducir con imágenes",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,261 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "http://www.freecambay.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
itemlist.append(item.clone(action="categorias", title="Modelos",
url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \
"_list&sort_by=total_videos"))
itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/"))
itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/"))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "freecambay"):
action = "menu_info"
# Extrae las entradas
patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="duration">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
if '>HD<' in quality:
scrapedtitle += " [COLOR red][HD][/COLOR]"
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]*)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<a class="item" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?<div class="videos">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
if videos:
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s&from=%s" % (item.url, next_page)
itemlist.append(item.clone(action="categorias", title=">> Página Siguiente", url=next_page))
return itemlist
def playlists(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?<div class="videos">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
if videos:
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="playlists", title=">> Página Siguiente", url=next_page))
return itemlist
def videos(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "freecambay"):
action = "menu_info"
# Extrae las entradas
patron = '<a href="([^"]+)" class="item ".*?data-original="([^"]+)".*?<strong class="title">\s*([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by" \
"=added2fav_date&&from=%s" % (item.url, next_page)
itemlist.append(item.clone(action="videos", title=">> Página Siguiente", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
if not matches:
patron = '<iframe.*?height="(\d+)".*?video_url\s*:\s*\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for url, quality in matches:
if "http" in quality:
calidad = url
url = quality
quality = calidad + "p"
itemlist.append(['.mp4 %s [directo]' % quality, url])
if item.extra == "play_menu":
return itemlist, data
return itemlist
def menu_info(item):
logger.info()
itemlist = []
video_urls, data = play(item.clone(extra="play_menu"))
itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls))
bloque = scrapertools.find_single_match(data, '<div class="block-screenshots">(.*?)</div>')
matches = scrapertools.find_multiple_matches(bloque, '<img class="thumb lazy-load".*?data-original="([^"]+)"')
for i, img in enumerate(matches):
if i == 0:
continue
img = urlparse.urljoin(host, img)
title = "Imagen %s" % (str(i))
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
return itemlist
def tags(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title == "Tags":
letras = []
matches = scrapertools.find_multiple_matches(data, '<strong class="title".*?>\s*(.*?)</strong>')
for title in matches:
title = title.strip()
if title not in letras:
letras.append(title)
itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title))
else:
if not item.length:
item.length = 0
bloque = scrapertools.find_single_match(data,
'>%s</strong>(.*?)(?:(?!%s)(?!#)[A-Z#]{1}</strong>|<div class="footer-margin">)' % (
item.extra, item.extra))
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">\s*(.*?)</a>')
for url, title in matches[item.length:item.length + 100]:
itemlist.append(Item(channel=item.channel, action="lista", url=url, title=title))
if len(itemlist) >= 100:
itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=">> Página siguiente",
length=item.length + 100, extra=item.extra))
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "goodpelis",
"name": "GoodPelis",
"active": true,
"active": false,
"adult": false,
"language": ["lat"],
"thumbnail": "http://goodpelis.net/wp-content/uploads/2017/11/Logo-GP.png",

View File

@@ -3,15 +3,29 @@
"name": "HDFilmologia",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["cast", "lat", "vose"],
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
"thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png",
"banner": "",
"categories": [
"movie",
"vos"
"vose",
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",

View File

@@ -7,7 +7,8 @@ import re
import sys
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -46,39 +47,45 @@ fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Últimas Agregadas", action="movies",thumbnail=get_thumb('last', auto=True),
itemlist.append(item.clone(title="Últimas Agregadas", action="movies", thumbnail=get_thumb('last', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'index.php?do=lastnews', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="movies", thumbnail=get_thumb('premieres', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
viewmode="movie_with_plot"))
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Más Vistas", action="movies",thumbnail=get_thumb('more watched', auto=True),
itemlist.append(item.clone(title="Más Vistas", action="movies", thumbnail=get_thumb('more watched', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'mas-vistas/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por País", action="countriesYears",thumbnail=get_thumb('country', auto=True),
itemlist.append(item.clone(title="Películas Por País", action="countriesYears", thumbnail=get_thumb('country',
auto=True), text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears",thumbnail=get_thumb('year', auto=True),
itemlist.append(item.clone(title="Géneros", action="genres", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Géneros", action="genres",thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -107,7 +114,7 @@ def sub_search(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' #url
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' # url
patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title
patron += '<div class="sres-desc">(.*?)</div>' # plot
@@ -117,7 +124,7 @@ def sub_search(item):
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", text_color=color3, page=0, plot=plot,
thumbnail=host+scrapedthumbnail))
thumbnail=host + scrapedthumbnail))
pagination = scrapertools.find_single_match(data, 'class="pnext"><a href="([^"]+)">')
@@ -147,10 +154,10 @@ def movies(item):
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year.strip()},
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year.strip()},
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -165,7 +172,6 @@ def movies(item):
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
@@ -182,7 +188,7 @@ def genres(item):
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=__channel__, action="movies", title=scrapedtitle,
url=host+scrapedurl, text_color=color3, viewmode="movie_with_plot"))
url=host + scrapedurl, text_color=color3, viewmode="movie_with_plot"))
return itemlist
@@ -197,15 +203,14 @@ def countriesYears(item):
patron_todas = 'Por País</option>(.*?)</option></select>'
else:
patron_todas = 'Por Año</option>(.*?)<option value="/">Peliculas'
data = scrapertools.find_single_match(data, patron_todas)
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=host+scrapedurl, action="movies"))
itemlist.append(item.clone(title=scrapedtitle, url=host + scrapedurl, action="movies"))
return itemlist
@@ -246,13 +251,17 @@ def findvideos(item):
title = "Ver en: [COLOR yellow](%s)[/COLOR] [COLOR yellowgreen]%s[/COLOR]" % (server.title(), lang)
if 'youtube' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang,
text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",

View File

@@ -1,22 +0,0 @@
{
"id": "hentaienespanol",
"name": "HentaiEnEspañol",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://s11.postimg.cc/cmuwcvvpf/hentaienespanol.png",
"banner": "https://s3.postimg.cc/j3qkfut8z/hentaienespanol_banner.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = 'http://www.xn--hentaienespaol-1nb.net/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart=''))
itemlist.append(
Item(channel=item.channel, title="Sin Censura", action="todas", url=host + 'hentai/sin-censura/', thumbnail='',
fanart=''))
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="box-peli" id="post-.*?">.<h2 class="title">.<a href="([^"]+)">([^<]+)<\/a>.*?'
patron += 'height="170px" src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle # .decode('utf-8')
thumbnail = scrapedthumbnail
fanart = ''
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
# Paginacion
title = ''
siguiente = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="([^"]+)">')
title = 'Pagina Siguiente >>> '
fanart = ''
itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return todas(item)
else:
return []

View File

@@ -0,0 +1,15 @@
{
"id": "javwhores",
"name": "javwhores",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.javwhores.com/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.javwhores.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="thumb" src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item ">.*?'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,17 @@
{
"id": "jizzbunker",
"name": "jizzbunker",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://s0.cdn3x.com/jb/i/logo-new.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://jizzbunker.com/es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular1"))
itemlist.append( Item(channel=item.channel, title="Tendencia" , action="peliculas", url=host + "/trending"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search?query=%s/" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><figure>.*?<a href="([^"]+)".*?<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?<span class="score">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedurl = scrapedurl.replace("channel", "channel30")
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><figure>.*?<a href="([^"]+)/([^"]+).html".*?<img class="lazy" data-original="([^"]+)".*?<time datetime=".*?">([^"]+)</time>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
url = scrapedurl + "/" + scrapedtitle + ".html"
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">&rarr;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'type:\'video/mp4\',src:\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("https", "http")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -108,8 +108,8 @@ def series(item):
plot=scrapedplot, show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
try:
siguiente = scrapertools.find_single_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
scrapedurl = item.url + siguiente
siguiente = scrapertools.find_single_match(data, '<a class="text nav-next" href="([^"]+)"')
scrapedurl = siguiente
scrapedtitle = ">> Pagina Siguiente"
scrapedthumbnail = ""
scrapedplot = ""

View File

@@ -17,7 +17,6 @@ host = "https://maxipelis24.tv"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "peliculas", action = "movies", url = host, thumbnail = get_thumb('movies', auto = True)))
@@ -40,7 +39,6 @@ def category(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
if item.cat == 'genre':
data = scrapertools.find_single_match(data, '<h3>Géneros <span class="icon-sort">.*?</ul>')
patron = '<li class="cat-item cat-item.*?<a href="([^"]+)" >([^<]+)<'
@@ -50,7 +48,6 @@ def category(item):
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<h3>Calidad.*?</div>')
patron = 'li><a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel = item.channel, action = 'movies', title =scrapedtitle, url = scrapedurl, type = 'cat', first = 0))
@@ -59,15 +56,12 @@ def category(item):
def movies(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
patron = '<div id="mt.+?href="([^"]+)".+?'
patron += '<img src="([^"]+)" alt="([^"]+)".+?'
patron += '<span class="ttx">([^<]+).*?'
patron += 'class="year">([^<]+).+?class="calidad2">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, img, scrapedtitle, resto, year, quality in matches:
scrapedtitle = re.sub(r'\d{4}|[()]','', scrapedtitle)
@@ -83,26 +77,21 @@ def movies(item):
contentType = "movie",
quality = quality,
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
#Paginacion
matches = re.compile('class="respo_pag"><div class="pag.*?<a href="([^"]+)" >Siguiente</a><', re.DOTALL).findall(data)
if matches:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(Item(channel = item.channel, action = "movies", title = "Página siguiente >>", url = url))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
if 'maxipelis24.tv/hideload/?' in link:
if 'id=' in link:
@@ -120,24 +109,25 @@ def findvideos(item):
elif 'ed=' in link:
id_type = 'ed'
ir_type = 'er'
else:
continue
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)
ir = id[::-1]
referer = base_link+'%s=%s&/' % (id_type, ir)
video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer':referer},
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)
ir = id[::-1]
referer = base_link+'%s=%s&/' % (id_type, ir)
video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer':referer},
follow_redirects=False)
url = video_data.headers['location']
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
language='', infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
url = video_data.headers['location']
title = '%s'
new_item = Item(channel=item.channel, title=title, url=url, action='play', language = '', infoLabels=item.infoLabels)
itemlist.append(new_item)
else:
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
url = link
title = '%s'
new_item = Item(channel=item.channel, title=title, url=url, action='play', language = '', infoLabels=item.infoLabels)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel = item.channel, action = ""))
@@ -145,6 +135,4 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "mundoflv",
"name": "MundoFlv",
"active": true,
"active": false,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s32.postimg.cc/h1ewz9hhx/mundoflv.png",

View File

@@ -82,6 +82,7 @@
"Descargas2020",
"Tumejortorrent",
"Torrentrapid",
"Pctnew",
"Torrentlocura",
"Tvsinpagar",
"Planetatorrent",
@@ -92,7 +93,7 @@
"id": "clonenewpct1_channels_list",
"type": "text",
"label": "Lista de clones de NewPct1 y orden de uso",
"default": "('1', 'descargas2020', 'http://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('1', 'tumejortorrent', 'http://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentrapid', 'http://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'planetatorrent', 'http://planetatorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
"default": "('1', 'descargas2020', 'http://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('1', 'tumejortorrent', 'http://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentrapid', 'http://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'pctnew', 'http://pctnew.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'planetatorrent', 'http://planetatorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
"enabled": true,
"visible": false
},
@@ -100,7 +101,7 @@
"id": "intervenidos_channels_list",
"type": "text",
"label": "Lista de canales y clones de NewPct1 intervenidos y orden de sustitución de URLs",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('0', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('1', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto'), ('1', 'newpct1', 'descargas2020', 'http://www.newpct1.com', 'http://descargas2020.com', '', '', '', '', '', '*', '', 'force')",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force')",
"enabled": true,
"visible": false
},

View File

@@ -44,19 +44,19 @@ clone_list_random = []
if host_index == 0: #Si el clones es "Aleatorio"...
i = 0
j = 2 #... marcamos el último de los clones "buenos"
j = 3 #... marcamos el último de los clones "buenos"
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
if i <= j and active_clone == "1":
clone_list_random += [clone_list[i]] #... añadimos el clone activo "bueno" a la lista
clone_list_random += [clone_list[i]] #... añadimos el clone activo "bueno" a la lista
else:
break
i += 1
if clone_list_random: #Si hay clones en la lista aleatoria...
clone_list = [random.choice(clone_list_random)] #Seleccionamos un clone aleatorio
#logger.debug(clone_list)
host_index = 1 #mutamos el num. de clone para que se procese en el siguiente loop
host_index = 1 #mutamos el num. de clone para que se procese en el siguiente loop
if host_index > 0 or not clone_list_random: #Si el Clone por defecto no es Aleatorio, o hay ya un aleatorio sleccionado...
if host_index > 0 or not clone_list_random: #Si el Clone por defecto no es Aleatorio, o hay ya un aleatorio sleccionado...
i = 1
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
if i == host_index:
@@ -202,20 +202,29 @@ def submenu(item):
if "pelisyseries.com" in item.channel_host and item.extra == "varios": #compatibilidad con mispelisy.series.com
data = '<li><a href="' + item.channel_host + 'varios/" title="Documentales">Documentales</a></li>'
else:
data = scrapertools.get_match(data, patron) #Seleccionamos el trozo que nos interesa
if not data:
data_menu = scrapertools.get_match(data, patron) #Seleccionamos el trozo que nos interesa
if not data_menu:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
patron = '<li><a.*?href="([^"]+)"\s?.itle="[^"]+"\s?>([^>]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = re.compile(patron, re.DOTALL).findall(data_menu)
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data_menu)
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
matches_hd = []
if item.extra == "peliculas":
patron = '<h3\s*(?:style="[^"]+")?>(?:<strong>)?Peliculas(?:<\/strong>)? en HD <a href="[^"]+"\s*class="[^"]+"\s*title="[^"]+">(?:ver .*?)?<\/a><span(?: style="[^"]+")?>(.*?)(?:<\/span>)?<\/h3>'
data_hd = scrapertools.find_single_match(data, patron) #Seleccionamos el trozo que nos interesa
if data_hd:
patron = '<a href="([^"]+)"\s*.itle="[^"]+"\s*>([^<]+)<\/a>'
matches_hd = re.compile(patron, re.DOTALL).findall(data_hd)
#logger.debug(matches_hd)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
@@ -229,8 +238,19 @@ def submenu(item):
item.extra2 = ""
itemlist.append(item.clone(action="listado", title=title, url=scrapedurl))
if matches_hd and 'HD' in title:
for scrapedurlcat, scrapedtitlecat in matches_hd: #Pintamos las categorías de peliculas en HD
if '4k' in scrapedtitlecat.lower(): #... ignoramos 4K, no funcionan las categorías
continue
itemlist.append(item.clone(action="listado", title=" - Calidad: " + scrapedtitlecat, url=scrapedurlcat))
itemlist.append(item.clone(action="alfabeto", title=title + " [A-Z]", url=scrapedurl))
if item.extra == "varios" and len(itemlist) == 0:
itemlist.append(item.clone(action="listado", title="Varios", url=item.channel_host + "varios/"))
itemlist.append(item.clone(action="alfabeto", title="Varios" + " [A-Z]", url=item.channel_host + "varios/"))
if item.extra == "peliculas":
itemlist.append(item.clone(action="listado", title="Películas 4K", url=item.channel_host + "peliculas-hd/4kultrahd/"))
itemlist.append(item.clone(action="alfabeto", title="Películas 4K" + " [A-Z]", url=item.channel_host + "peliculas-hd/4kultrahd/"))

View File

@@ -238,14 +238,15 @@ def findvideos(item):
import urllib
itemlist = []
data = get_source(item.url)
patron = 'data-post="(\d+)" data-nume="(\d+).*?class="title">([^>]+)<'
data = data.replace("'", '"')
patron = 'data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+).*?class="title">([^>]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for pt, nm, language in matches:
for type, pt, nm, language in matches:
if 'sub' in language.lower() or language not in IDIOMAS:
language = 'VOSE'
post = {'action': 'doo_player_ajax', 'post': pt, 'nume': nm}
post = {'action': 'doo_player_ajax', 'post': pt, 'nume': nm, 'type': type}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post,
headers={'Referer': item.url}).data

View File

@@ -246,9 +246,10 @@ def findvideos(item):
import urllib
itemlist = []
data = get_source(item.url)
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
data = data.replace("'",'"')
patron = 'data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+).*?img src=\"([^"]+)\"'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
for type, id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
quality = ''
if lang not in IDIOMAS:
@@ -258,7 +259,7 @@ def findvideos(item):
else:
title = ''
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':item.type}
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':type}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'

View File

@@ -3,16 +3,30 @@
"name": "Pelis24",
"active": true,
"adult": false,
"language": ["lat"],
"language": ["lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://www.pelis24.in/wp-content/uploads/2018/05/44.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos"
"vose",
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",

View File

@@ -53,35 +53,32 @@ list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Novedades", action="peliculas",thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'movies/', viewmode="movie_with_plot"),
itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'movies/', viewmode="movie_with_plot"),
item.clone(title="Tendencias", action="peliculas",thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Tendencias", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas",thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'genre/estrenos/', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'genre/estrenos/', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genresYears",thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
@@ -98,6 +95,7 @@ def search(item, texto):
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
@@ -107,7 +105,7 @@ def sub_search(item):
# logger.info(data)
data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages')
# logger.info(data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
patron += '<span class="year">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -139,7 +137,8 @@ def peliculas(item):
data = scrapertools.decodeHtmlentities(data)
# logger.info(data)
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
# img, title
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url
patron += '</h3><span>([^<]+)</span>' # year
@@ -148,11 +147,10 @@ def peliculas(item):
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]:
title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year}, quality=quality,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year}, quality=quality,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -164,7 +162,6 @@ def peliculas(item):
if next_page:
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
return itemlist
@@ -180,10 +177,10 @@ def genresYears(item):
else:
patron_todas = '<h2>Generos</h2>(.*?)</div><aside'
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
data = scrapertools.find_single_match(data, patron_todas)
# logger.error(texto='***********uuuuuuu*****' + data)
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
# patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
@@ -192,7 +189,6 @@ def genresYears(item):
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
return itemlist
@@ -222,7 +218,7 @@ def series(item):
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
@@ -232,7 +228,7 @@ def series(item):
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:'+scrapedthumbnail, contentType='tvshow'))
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
@@ -360,7 +356,8 @@ def findvideos(item):
# urls = re.compile(patron2, re.DOTALL).findall(data)
for option, lang in matches:
url = scrapertools.find_single_match(data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option)
url = scrapertools.find_single_match(
data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option)
lang = lang.lower().strip()
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
@@ -374,10 +371,9 @@ def findvideos(item):
server = servertools.get_server_from_url(url)
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang)
# if 'google' not in url and 'directo' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
@@ -388,7 +384,6 @@ def findvideos(item):
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',

View File

@@ -3,14 +3,14 @@
"name": "PelisPlay",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"fanart": "https://s33.postimg.cc/d3ioghaof/image.png",
"language": ["cast", "lat", "vose"],
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
"thumbnail": "https://www.pelisplay.tv/static/img/logo.png",
"banner": "https://s33.postimg.cc/cyex6xlen/image.png",
"banner": "https://i.postimg.cc/tCb8wh8s/pelisplaybn.jpg",
"categories": [
"movie",
"tvshow",
"vos"
"vose"
],
"settings": [
{
@@ -22,7 +22,9 @@
"visible": true,
"lvalues": [
"No filtrar",
"Latino"
"Latino",
"Castellano",
"Subtitulado"
]
},
{

View File

@@ -46,13 +46,12 @@ parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT'}
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
@@ -63,12 +62,13 @@ def mainlist(item):
viewcontent='tvshow', viewmode="tvshow_with_plot",
thumbnail=get_thumb("channels_tvshow.png")),
item.clone(title="Netflix", action="flixmovies", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', viewmode="movie_with_plot", fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png',
thumbnail='http://img.app.kiwi/icon/jcbqFma-5e91cY9MlEasA-fvCRJK493MxphrqbBd8oS74FtYg00IXeOAn0ahsLprxIA'),
item.clone(title="Netflix", action="flixmenu", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', viewmode="movie_with_plot",
fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png',
thumbnail='https://i.postimg.cc/Pxs9zYjz/image.png'),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
thumbnail=get_thumb('search.png'), url=host+'buscar')]
thumbnail=get_thumb('search.png'), url=host + 'buscar')]
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -81,28 +81,10 @@ def menumovies(item):
viewcontent='movie', url=host + 'peliculas?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
viewcontent='movie', url=host, viewmode="movie_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
viewcontent='movie', url=host+'peliculas', viewmode="movie_with_plot"),
viewcontent='movie', url=host + 'peliculas', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscarp',
thumbnail=get_thumb('search.png'), url=host+'peliculas')]
return itemlist
def flixmovies(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion',
viewcontent='movie', viewmode="movie_with_plot"),
# item.clone(title="Estrenos", action="peliculas", text_blod=True,
# viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
viewcontent='movie', url=host, viewmode="movie_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
viewcontent='movie', url=host+'netflix', viewmode="movie_with_plot")]
thumbnail=get_thumb('search.png'), url=host + 'peliculas')]
return itemlist
@@ -116,12 +98,52 @@ def menuseries(item):
item.clone(title="Recíen Agregadas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', url=host + 'series?filtro=fecha_actualizacion', viewmode="tvshow_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie',
viewcontent='movie', url=host+'series', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscars',
thumbnail=get_thumb('search.png'), url=host+'series')]
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie',
viewcontent='movie', url=host + 'series', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscars',
thumbnail=get_thumb('search.png'), url=host + 'series')]
return itemlist
def flixmenu(item):
logger.info()
itemlist = [item.clone(title="Películas", action="flixmovies", text_blod=True, extra='movie', mediatype="movie",
viewcontent='movie', viewmode="tvshow_with_plot"),
item.clone(title="Series", action="flixtvshow", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', viewmode="tvshow_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True,
thumbnail=get_thumb('search.png'), url=host + 'buscar')]
return itemlist
def flixmovies(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion',
viewcontent='movie', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra="buscarp",
thumbnail=get_thumb('search.png'), url=host + 'peliculas/netflix')]
return itemlist
def flixtvshow(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="series", text_blod=True, url=host + 'series/netflix?filtro=fecha_actualizacion',
viewcontent='tvshow', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True,
viewcontent='tvshow', url=host + 'series/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="series", text_blod=True,
viewcontent='tvshow', url=host + 'series/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra="buscars",
thumbnail=get_thumb('search.png'), url=host + 'series/netflix')]
return itemlist
@@ -130,9 +152,8 @@ def p_portipo(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
action = ''
patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url
patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url
patron += '<div class="[^<]+<img class="[^"]+" src="/([^"]+)"></div><div class="[^"]+">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
@@ -140,9 +161,9 @@ def p_portipo(item):
action = 'peliculas'
elif item.extra == 'serie':
action = 'series'
itemlist.append(item.clone(action = action,
title = scrapedtitle,
url = scrapedurl,
itemlist.append(item.clone(action=action,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail
))
itemlist.sort(key=lambda it: it.title)
@@ -154,10 +175,9 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<img class="posterentrada" src="/([^"]+)".*?' # img
patron += '<a href="([^"]+)">.*?' # url
patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year
patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year
patron += '<div class="Description"> <div>([^<]+)</div>.*?' # plot
patron += '<strong>([^<]+)</strong></h4>' # title
@@ -168,12 +188,13 @@ def peliculas(item):
item.plot = plot
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
infoLabels={"year":year}, thumbnail=host+scrapedthumbnail,
infoLabels={"year": year}, thumbnail=host + scrapedthumbnail,
url=scrapedurl, title=scrapedtitle, plot=plot))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
pagination = scrapertools.find_single_match(
data, '<li><a href="([^"]+)" rel="next">')
if pagination:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
@@ -201,13 +222,6 @@ def search(item, texto):
return []
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if pagination:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png")))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
@@ -241,19 +255,19 @@ def series(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?' # img, url
patron += 'class="link-title"><h2>([^<]+)</h2>' # title
patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?'
patron += 'class="link-title"><h2>([^<]+)</h2>' # title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
url=scrapedurl, thumbnail=host+scrapedthumbnail,
url=scrapedurl, thumbnail=host + scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
pagination = scrapertools.find_single_match(
data, '<li><a href="([^"]+)" rel="next">')
if pagination:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
@@ -264,10 +278,8 @@ def series(item):
def temporadas(item):
logger.info()
itemlist = []
from core import jsontools
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<img class="posterentrada" src="/([^"]+)" alt="\w+\s*(\w+).*?'
patron += 'class="abrir_temporada" href="([^"]+)">' # img, season
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -275,20 +287,21 @@ def temporadas(item):
if len(matches) > 1:
for scrapedthumbnail, temporada, url in matches:
new_item = item.clone(action="episodios", season=temporada, url=url,
thumbnail=host+scrapedthumbnail, extra='serie')
thumbnail=host + scrapedthumbnail, extra='serie')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
i.title = "%s. %s" % (
i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadírselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: it.title)
# itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
@@ -304,26 +317,25 @@ def episodios(item):
from core import jsontools
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
post_link = '%sentradas/abrir_temporada' % host
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
data_t = scrapertools.find_single_match(data, '<a data-s="[^"]+" data-t="([^"]+)"')
data_s = scrapertools.find_single_match(data, '<a data-s="([^"]+)" data-t="[^"]+"')
post= {'t':data_t, 's':data_s, '_token':token}
post = {'t': data_t, 's': data_s, '_token': token}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(post_link, post=post).data
# json_data = jsontools.load(new_data)
# logger.info(new_data)
patron = '"nepisodio":"([^"]+)",[^,]+,"ntemporada":"([^"]+)".*?"url_directa":"([^"]+)",.*?"titulo":"([^"]+)",'
json_data = jsontools.load(new_data)
matches = re.compile(patron, re.DOTALL).findall(new_data)
for episode, season, scrapedurl, scrapedname in matches:
scrapedurl = scrapedurl.replace('\\', '')
logger.info('###name%s' % scrapedname)
for element in json_data['data']['episodios']:
scrapedname = element['titulo']
episode = element['metas_formateadas']['nepisodio']
season = element['metas_formateadas']['ntemporada']
scrapedurl = element['url_directa']
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
title = "%sx%s: %s" % (season, episode.zfill(
2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='serie')
if 'infoLabels' not in new_item:
@@ -338,7 +350,8 @@ def episodios(item):
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadírselo al titulo del item
i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
i.title = "%sx%s: %s" % (
i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
@@ -355,22 +368,20 @@ def episodios(item):
def findvideos(item):
logger.info()
from lib import generictools
from core import jsontools
import urllib
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?' # data-player, servername
patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>' # quality, lang
patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?'
patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_player, servername, quality, lang in matches:
post_link = '%sentradas/procesar_player' % host
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
post= {'data':data_player, 'tipo':'videohost', '_token':token}
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
post = {'data': data_player, 'tipo': 'videohost', '_token': token}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(post_link, post=post).data
json_data = jsontools.load(new_data)
@@ -378,12 +389,13 @@ def findvideos(item):
if 'pelisplay.tv/embed/' in url:
new_data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(new_data, '"file":"([^"]+)",').replace('\\', '')
url = scrapertools.find_single_match(
new_data, '"file":"([^"]+)",').replace('\\', '')
elif 'fondo_requerido' in url:
link = scrapertools.find_single_match(url, '=(.*?)&fondo_requerido').partition('&')[0]
post_link = '%sprivate/plugins/gkpluginsphp.php' % host
post= {'link':link}
post = {'link': link}
post = urllib.urlencode(post)
new_data2 = httptools.downloadpage(post_link, post=post).data
url = scrapertools.find_single_match(new_data2, '"link":"([^"]+)"').replace('\\', '')
@@ -391,14 +403,15 @@ def findvideos(item):
lang = lang.lower().strip()
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
'subtitulado': '[COLOR red](VOS)[/COLOR]'}
'subtitulado': '[COLOR red](VOSE)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (servername.title(), quality, lang)
itemlist.append(item.clone(channel=__channel__, title=title, action='play', language=lang, quality=quality, url=url))
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
servername.title(), quality, lang)
itemlist.append(item.clone(channel=__channel__, title=title,
action='play', language=lang, quality=quality, url=url))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)

View File

@@ -15,7 +15,9 @@ host = 'http://www.perfectgirls.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/top/3days/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -38,7 +40,7 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
@@ -54,17 +56,25 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="list__item_link"><a href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)".*?<time>([^"]+)</time>'
patron = '<div class="list__item_link"><a href="([^"]+)" title="([^"]+)">.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<time>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,time in matches:
plot = ""
contentTitle = scrapedtitle
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
scrapedhd = scrapertools.find_single_match(time, '<div class="hd">([^"]+)</div>')
if scrapedhd == 'HD':
time = scrapertools.find_single_match(time, '([^"]+)</time>')
title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle
else:
time = scrapertools.find_single_match(time, '([^"]+)</time>')
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
scrapedthumbnail = "http:" + scrapedthumbnail
url = urlparse.urljoin(item.url,scrapedurl)
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>')
if next_page:
next_page = urlparse.urljoin(item.url, next_page)
@@ -72,11 +82,11 @@ def peliculas(item):
return itemlist
def findvideos(item):
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)"'
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)" type="video/mp4" default/>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))

View File

@@ -0,0 +1,16 @@
{
"id": "pornboss",
"name": "pornboss",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://pornboss.org/wp-content/uploads/2018/10/cropped-bottom-180x180.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,82 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://pornboss.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/category/movies/"))
itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/movies/"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/clips/"))
itemlist.append( Item(channel=item.channel, title=" categorias" , action="peliculas", url=host + "/category/clips/"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
if item.url == host + "/category/movies/":
data = scrapertools.get_match(data,'>Movies</a>(.*?)</ul>')
else:
data = scrapertools.get_match(data,'>Clips</a>(.*?)</ul>')
patron = '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<article id="post-\d+".*?<img class="center cover" src="([^"]+)" alt="([^"]+)".*?<blockquote>.*?<a href=\'([^\']+)\''
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail,scrapedtitle,scrapedurl in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page_url!="":
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -0,0 +1,15 @@
{
"id": "pornrewind",
"name": "pornrewind",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.pornrewind.com/static/images/logo-light-pink.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,76 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.pornrewind.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/videos/?sort_by=post_date"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/videos/?sort_by=rating"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/videos/?sort_by=video_viewed"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="thumb-categories" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="lazyload" data-src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="thumb" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="lazyload" data-src="([^"]+)".*?'
patron += '<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<li class="direction"><a href="([^"]+)" data-ajax="pagination">')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
return itemlist

View File

@@ -195,6 +195,7 @@ def findvideos(item):
action = "play",
language = idio[datos["audio"]],
quality = cali[datos["quality"]],
server = "",
title = titulo,
url = url1
))

View File

@@ -168,6 +168,7 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for url, language in matches:
url = url.replace('&#038;','&')
data = httptools.downloadpage(url, follow_redirects=False, headers={'Referer':item.url}, only_headers=True)
url = data.headers['location']

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -32,6 +32,8 @@ def mainlist(item):
itemlist.append(
Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0))
itemlist.append(
Item(channel=item.channel, action="lista", title="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -45,9 +47,12 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?'
if item.title == "Series":
patron += 'class="link">.+?<img src="([^"]+)".*?'
else:
patron += 'class="link-la">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">'
if item.url==host:
if item.url==host or item.url==host+"/liveaction":
a=1
else:
num=(item.url).split('-')
@@ -150,25 +155,24 @@ def findvideos(item):
_sa = scrapertools.find_single_match(data, 'var _sa = (true|false);')
_sl = scrapertools.find_single_match(data, 'var _sl = ([^;]+);')
sl = eval(_sl)
#buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">([^<]+)</button>')
#for id, title in buttons:
new_url = golink(0, _sa, sl)
data = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data, 'var x0x = ([^;]+);')
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
#buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">')
buttons = [0,1,2]
for id in buttons:
new_url = golink(int(id), _sa, sl)
data_new = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data_new, 'var x0x = ([^;]+);')
try:
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
except Exception as e:
logger.info(e)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
@@ -210,4 +214,4 @@ def resolve(value1, value2):
lista[j] = k
reto += chr(ord(value2[i]) ^ lista[(lista[m] + lista[j]) % 256])
return reto
return reto

View File

@@ -0,0 +1,16 @@
{
"id": "spankbang",
"name": "spankbang",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://static.spankbang.com/static_desktop/Images/logo_desktop@2xv2.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://spankbang.xxx'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url= host + "/new_videos/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url= host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="peliculas", url= host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s-1.html" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '&nbsp;<a href="([^"]+)" class="link1">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
# data = httptools.downloadpage(item.url).data
data = scrapertools.cachePage(item.url)
# <div class="video-item" data-id="4652797">
# <a href="/2rq4d/video/yenlomfc" class="thumb ">
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb3.spankbang.com/250/4/6/4652797-t6.jpg" alt="yenlomfc" class="cover lazyload has_mp4" />
# <span class="play fa fa-play-circle-o fa-3x"></span>
# <span class="i-len"><i class="fa fa-clock-o"></i> 73</span>
# </a>
# <span class="i-wl" onclick="add_wl(4652797, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
# <span class="i-fav" onclick="add_fav(4652797, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
# <span class="i-flag" onclick="show_flag(4652797)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
# <div class="inf">yenlomfc</div>
# <ul>
# <li>Hace 11 minutos</li>
# <li><i class="fa fa-eye"></i> 60</li>
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
# </ul>
# </div>
# <div class="video-item" data-id="4652795">
# <a href="/2rq4b/video/penny+underbust+playstation+modeling" class="thumb ">
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb1.spankbang.com/250/4/6/4652795-t6.jpg" alt="Penny Underbust Playstation Modeling" class="cover lazyload " />
# <span class="play fa fa-play-circle-o fa-3x"></span>
# <span class="i-hd">1080p</span>
# <span class="i-len"><i class="fa fa-clock-o"></i> 3</span>
# </a>
# <span class="i-wl" onclick="add_wl(4652795, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
# <span class="i-fav" onclick="add_fav(4652795, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
# <span class="i-flag" onclick="show_flag(4652795)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
# <div class="inf">Penny Underbust Playstation Modeling</div>
# <ul>
# <li>Hace 12 minutos</li>
# <li><i class="fa fa-eye"></i> 99</li>
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
# </ul>
# </div>
patron = '<div class="video-item" data-id="\d+">.*?'
patron += '<a href="([^"]+)".*?'
patron += 'data-src="([^"]+)" alt="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
# http://cdnthumb1.spankbang.com/250/4/6/4652755-t6.jpg
thumbnail = "http:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
# <li class="next"><a href="/new_videos/2/">&raquo;</a></li>
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)">')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'servervideo = \'([^\']+)\'.*?'
patron += 'path = \'([^\']+)\'.*?'
patron += 'filee = \'([^\']+)\'.*?'
matches = scrapertools.find_multiple_matches(data, patron)
for servervideo,path,filee in matches:
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,17 @@
{
"id": "streamingporn",
"name": "streamingporn",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "http://streamingporn.xyz/wp-content/uploads/2017/06/streamingporn.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,105 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://streamingporn.xyz'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/category/movies/"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/stream/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'PaySites(.*?)<li id="menu-item-28040"')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<a href="#">Categories</a>(.*?)<li id="menu-item-30919')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="entry-featuredImg">.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
url = scrapedurl
title = scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<div class="loadMoreInfinite"><a href="(.*?)" >Load More')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -0,0 +1,17 @@
{
"id": "streamporno",
"name": "streamporno",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "http://pornstreams.eu/wp-content/uploads/2015/12/faviiconeporn.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://streamporno.eu'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li id="menu-item-.*?<a href="([^"]+)">([^"]+)</a>'
if item.title == "Categorias":
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="peliculas", url=host + "/?s=big+tits"))
patron = '<li class="cat-item.*?<a href="([^"]+)" >([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<article id=.*?<a href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist

View File

@@ -14,3 +14,4 @@
]
}

View File

@@ -41,7 +41,7 @@ def search(item, texto):
def catalogo(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="vidcountSp">(\d+)</div>.*?<a class="categoryTitle channelTitle" href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for cantidad,scrapedurl,scrapedtitle,scrapedthumbnail in matches:
@@ -62,7 +62,7 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
if item.title=="PornStars" :
data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h2>(.*?)</section>')
data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h1>(.*?)</section>')
patron = '<a class="thumb" href="([^"]+)">.*?<img src="([^"]+)".*?<div class="vidcountSp">(.*?)</div>.*?<a class="categoryTitle".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
@@ -84,7 +84,7 @@ def categorias(item):
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
@@ -106,7 +106,7 @@ def peliculas(item):
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<meta itemprop="contentUrl" content="([^"]+)" />'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:

View File

@@ -0,0 +1,16 @@
{
"id": "tubedupe",
"name": "tubedupe",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://tubedupe.com/apple-touch-icon-180x180-precomposed.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,106 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://tubedupe.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/?sort_by=model_viewed"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/?sort_by=cs_viewed"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=avg_videos_popularity"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title == "Categorias" or "Canal" :
patron = '<a href="([^"]+)" class="list-item" title="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<var class="duree">([^"]+) </var>'
else:
patron = '<div class="block-pornstar">.*?<a href="([^"]+)" title="([^"]+)" >.*?src="([^"]+)".*?<div class="col-lg-4-fixed nb-videos">.*?<br>(\d+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="categorias", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="block-video">.*?'
patron += '<a href="([^"]+)" class="[^"]+" title="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<var class="duree">(.*?)</var>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,77 @@
{
"id": "tvpelis",
"name": "TvPelis",
"active": true,
"adult": false,
"language": ["lat", "cast", "*"],
"thumbnail": "http://www.tvpelis.tv/wp-content/themes/tvpelistv3/images/logo.png",
"banner": "",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,374 @@
# -*- coding: utf-8 -*-
# -*- Channel TvPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.tvpelis.tv/'
IDIOMAS = {'Latino': 'LAT', 'latino': 'LAT', 'Español':'CAST', 'castellano': 'CAST', 'Vose':'VOSE', 'vose':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['xdrive', 'bitertv', 'okru']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="movies_menu",
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series", action="list_all", url=host+'genero/series/',
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Documentales", action="list_all", url=host + 'genero/documentales/',
thumbnail=get_thumb('documental', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'genero/latino/',
# thumbnail=get_thumb('lat', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'genero/vose/',
# thumbnail=get_thumb('vose', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Generos", action="section",
# thumbnail=get_thumb('genres', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
# thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title='Buscar', action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def movies_menu(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url=host + 'genero/castellano/',
thumbnail=get_thumb('cast', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'genero/latino/',
thumbnail=get_thumb('lat', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'genero/vose/',
thumbnail=get_thumb('vose', auto=True)))
itemlist.append(Item(channel=item.channel, title="Hindú", action="list_all", url=host + 'genero/hindu/',
thumbnail=get_thumb('hindu', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
thumbnail=get_thumb('year', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data,
"<div id='z1'><section><div id='main'><div class='breadcrumbs'>(.*?)</ul>")
logger.debug(data)
patron = 'article id=.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?'
patron += 'class="selectidioma">(.*?)class="fixyear".*?class="genero">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, type in matches:
url = scrapedurl
lang = get_language(lang_data)
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '([^\(]+)\(?').strip()
#scrapedtitle = scrapedtitle.replace('Latino','')
scrapedtitle = re.sub('latino|español|sub|audio','', scrapedtitle.lower()).capitalize()
if not config.get_setting('unify'):
title = '%s %s' % (scrapedtitle, lang)
else:
title = scrapedtitle
thumbnail = 'https:'+scrapedthumbnail
new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, language = lang,
infoLabels={'year':year})
logger.debug(type)
if 'series' not in type.lower():
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
else:
new_item.contentSerieName = scrapedtitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(full_data, '<link rel="next" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '<h2>Categorias de Peliculas</h2>(.*?)</ul>')
patron = 'href="([^"]+)"> <em>Peliculas de </em>([^<]+)<span>'
if item.title == 'Por Años':
data = scrapertools.find_single_match(data, '>Filtrar por A&ntilde;o</option>(.*?)</select>')
patron = 'value="([^"]+)">Peliculas del A&ntilde;o (\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, title=title.strip(), url=url, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
all_seasons = []
data=get_source(item.url)
patron='Temporada \d+'
matches = re.compile(patron, re.DOTALL).findall(data)
action = 'episodesxseasons'
if len(matches) == 0:
matches.append('1')
action = 'aios'
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
if title not in all_seasons:
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action=action,
infoLabels=infoLabels))
all_seasons.append(title)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def aios(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='href="([^"]+)" rel="bookmark"><i class="fa icon-chevron-sign-right"></i>.*?Capitulo (?:00|)(\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode in matches:
infoLabels['episode'] = scrapedepisode
url = item.url+scrapedurl
title = '%sx%s - Episodio %s' % (infoLabels['season'], infoLabels['episode'], infoLabels['episode'])
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='<a href="([^"]+)".*?</i>.*?Temporada %s, Episodio (\d+) - ([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '<em class="bandera sp([^"]+)"')
for lang in lang_list:
if not lang in IDIOMAS:
lang = 'vose'
lang = IDIOMAS[lang]
if lang not in language:
language.append(lang)
return language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div id="([^"]+)".?class="tab_part.*?">.?<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = 'class="(rep)".*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, player in matches:
if 'ok.ru' in player:
url = 'http:' + player
elif 'rutube' in player:
url = 'http:' + player + "|%s" % item.url
elif 'http' not in player:
hidden_data = get_source('%s%s' % (host, player))
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = player
lang = scrapertools.find_single_match(data, '<li rel="%s">([^<]+)</li>' % option)
if lang.lower() in ['online', 'trailer']:
continue
if lang in IDIOMAS:
lang = IDIOMAS[lang]
if not config.get_setting('unify'):
title = ' [%s]' % lang
else:
title = ''
if url != '':
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'latino':
item.url = host + 'filter?language=2'
elif categoria == 'castellano':
item.url = host + 'filter?language=1'
elif categoria == 'infantiles':
item.url = host + 'genre/25/infantil'
elif categoria == 'terror':
item.url = host + 'genre/15/terror'
item.pages=3
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -10,27 +10,10 @@ from core import tmdb
from core.item import Item
from channels import filtertools, autoplay
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.ultrapeliculashd.com'
tgenero = {"ACCIÓN": "https://s3.postimg.cc/y6o9puflv/accion.png,",
"ANIMACIÓN": "https://s13.postimg.cc/5on877l87/animacion.png",
"AVENTURA": "https://s10.postimg.cc/6su40czih/aventura.png",
"CIENCIA FICCIÓN": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
"COMEDIA": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
"CRIMEN": "https://s4.postimg.cc/6z27zhirx/crimen.png",
"DRAMA": "https://s16.postimg.cc/94sia332d/drama.png",
"ESTRENOS": "https://s21.postimg.cc/fy69wzm93/estrenos.png",
"FAMILIA": "https://s7.postimg.cc/6s7vdhqrf/familiar.png",
"FANTASÍA": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
"GUERRA": "https://s4.postimg.cc/n1h2jp2jh/guerra.png",
"INFANTIL": "https://s23.postimg.cc/g5rmazozv/infantil.png",
"MISTERIO": "https://s1.postimg.cc/w7fdgf2vj/misterio.png",
"ROMANCE": "https://s15.postimg.cc/fb5j8cl63/romance.png",
"SUSPENSO": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
"TERROR": "https://s7.postimg.cc/yi0gij3gb/terror.png"
}
thumbletras = {'#': 'https://s32.postimg.cc/drojt686d/image.png',
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
'b': 'https://s32.postimg.cc/y1qgm1yp1/image.png',
@@ -81,31 +64,27 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Todas",
action="lista",
thumbnail='https://s18.postimg.cc/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
thumbnail=get_thumb('all', auto=True),
url=host + '/movies/'
))
itemlist.append(Item(channel=item.channel, title="Generos",
action="generos",
url=host,
thumbnail='https://s3.postimg.cc/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png'
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(Item(channel=item.channel, title="Alfabetico",
action="seccion",
url=host,
thumbnail='https://s17.postimg.cc/fwi1y99en/a-z.png',
fanart='https://s17.postimg.cc/fwi1y99en/a-z.png',
thumbnail=get_thumb('alphabet', auto=True),
extra='alfabetico'
))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + '/?s=',
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
thumbnail=get_thumb('search', auto=True)
))
autoplay.show_option(item.channel, itemlist)
@@ -168,8 +147,6 @@ def generos(item):
for scrapedurl, scrapedtitle in matches:
thumbnail = ''
fanart = ''
if scrapedtitle in tgenero:
thumbnail = tgenero[scrapedtitle]
title = scrapedtitle
url = scrapedurl
if scrapedtitle not in ['PRÓXIMAMENTE', 'EN CINE']:
@@ -221,56 +198,46 @@ def alpha(item):
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def findvideos(item):
from lib import jsunpack
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<div id=(option.*?) class=play.*?<iframe.*?'
patron += 'rptss src=(.*?) (?:width.*?|frameborder.*?) allowfullscreen><\/iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
full_data = get_source(item.url)
patron = '<div id="([^"]+)" class="play-box-iframe.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(full_data)
for option, video_url in matches:
language = scrapertools.find_single_match(data, '#%s>.*?-->(.*?)(?:\s|<)' % option)
language = scrapertools.find_single_match(full_data, '"#%s">.*?-->(.*?)(?:\s|<)' % option)
if 'sub' in language.lower():
language = 'SUB'
language = IDIOMAS[language]
if 'ultrapeliculashd' in video_url:
new_data = httptools.downloadpage(video_url).data
new_data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", new_data)
if 'drive' not in video_url:
quality= '1080p'
packed = scrapertools.find_single_match(new_data, '<script>(eval\(.*?)eval')
unpacked = jsunpack.unpack(packed)
url = scrapertools.find_single_match(unpacked, 'file:(http.?:.*?)\}')
else:
quality= '1080p'
url = scrapertools.find_single_match(new_data, '</div><iframe src=([^\s]+) webkitallowfullscreen')
elif 'stream' in video_url and 'streamango' not in video_url:
data = httptools.downloadpage('https:'+video_url).data
if not 'iframe' in video_url:
new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
new_data = httptools.downloadpage(new_url).data
url= ''
try:
url, quality = scrapertools.find_single_match(new_data, 'file:.*?(?:\"|\')(https.*?)(?:\"|\'),'
'label:.*?(?:\"|\')(.*?)(?:\"|\'),')
except:
pass
if url != '':
headers_string = '|Referer=%s' % url
url = url.replace('download', 'preview')+headers_string
data = httptools.downloadpage(video_url, follow_redirects=False, headers={'Referer': item.url}).data
sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"')
new_item = (Item(title=item.title, url=url, quality=quality, subtitle=sub, server='directo',
language = language))
itemlist.append(new_item)
else:
url = video_url
quality = 'default'
if 'hideload' in video_url:
quality = ''
new_id = scrapertools.find_single_match(data, "var OLID = '([^']+)'")
new_url = 'https://www.ultrapeliculashd.com/hideload/?ir=%s' % new_id[::-1]
data = httptools.downloadpage(new_url, follow_redirects=False, headers={'Referer': video_url}).headers
url = data['location']+"|%s" % video_url
elif 'd.php' in video_url:
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
quality = '1080p'
packed = scrapertools.find_single_match(data, '<script>(eval\(.*?)eval')
unpacked = jsunpack.unpack(packed)
url = scrapertools.find_single_match(unpacked, '"file":("[^"]+)"')
elif 'drive' in video_url:
quality = '1080p'
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
url = scrapertools.find_single_match(data, 'src="([^"]+)"')
if not config.get_setting("unify"):
title = ' [%s] [%s]' % (quality, language)

View File

@@ -0,0 +1,89 @@
{
"id": "vi2",
"name": "vi2",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://i.postimg.cc/0Qy9wf8b/vi2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,337 @@
# -*- coding: utf-8 -*-
# -*- Channel Vi2.co -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['Full HD 1080p',
'HDRip',
'DVDScreener',
'720p',
'Ts Screener hq',
'HD Real 720p',
'DVDRip',
'BluRay-1080p',
'BDremux-1080p']
list_servers = [
'directo',
'openload',
'rapidvideo',
'jawcloud',
'cloudvideo',
'upvid',
'vevio',
'gamovideo'
]
host = 'http://vi2.co'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='select_menu', type='peliculas',
thumbnail= get_thumb('movies', auto=True)))
# itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='select_menu', type='series',
# thumbnail= get_thumb('tvshows', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def select_menu(item):
logger.info()
itemlist=[]
url = host + '/%s/es/' % item.type
itemlist.append(Item(channel=item.channel, title='Streaming', action='sub_menu',
thumbnail=get_thumb('all', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Torrent', action='sub_menu',
thumbnail=get_thumb('all', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Generos', action='section', url=url,
thumbnail=get_thumb('genres', auto=True), type='all'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url,
thumbnail=get_thumb('year', auto=True), type='all'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + 'ajax/1/?q=',
thumbnail=get_thumb("search", auto=True), type=item.type))
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
url = host + '/%s/es/ajax/1/' % item.type
link_type = item.title.lower()
if link_type == 'streaming':
link_type = 'flash'
movies_options = ['Todas', 'Castellano', 'Latino', 'VOSE']
tv_options = ['Ultimas', 'Ultimas Castellano', 'Ultimas Latino', 'Ultimas VOSE']
if item.type == 'peliculas':
title = movies_options
thumb_1 = 'all'
else:
thumb_1 = 'last'
title = tv_options
itemlist.append(Item(channel=item.channel, title=title[0], url=url+'?q=%s' % link_type,
action='list_all', thumbnail=get_thumb(thumb_1, auto=True), type=item.type,
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[1],
url=url + '?q=%s+espanol' % link_type, action='list_all',
thumbnail=get_thumb('cast', auto=True), type=item.type, send_lang='Español',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[2],
url=url + '?q=%s+latino' % link_type, action='list_all',
thumbnail=get_thumb('lat', auto=True), type=item.type, send_lang='Latino',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[3],
url=url + '?q=%s+subtitulado' % link_type, action='list_all',
thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE',
link_type=link_type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def section(item):
logger.info()
itemlist=[]
excluded = ['latino', 'español', 'subtitulado', 'v.o.', 'streaming', 'torrent']
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, 'toptags-container(.*?)<div class="android-more-section">')
patron = 'href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = host+scrapedurl.replace('/?','/ajax/1/?')
if (item.title=='Generos' and title.lower() not in excluded and not title.isdigit()) or (item.title=='Por Año' and title.isdigit()):
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
return itemlist
def list_all(item):
from core import jsontools
logger.info()
itemlist = []
listed =[]
quality=''
infoLabels = {}
json_data= jsontools.load(get_source(item.url))
data = json_data['render']
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
#if item.type == 'peliculas':
patron = '<img class="cover".*?src="([^"]+)" data-id="\d+" '
patron +='alt="Ver ([^\(]+)(.*?)">'
patron += '<div class="mdl-card__menu"><a class="clean-link" href="([^"]+)">'
patron += '.*?<span class="link-size">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, extra_info, scrapedurl , size in matches:
if item.send_lang != '':
lang = item.send_lang
else:
lang = ''
year='-'
extra_info = extra_info.replace('(', '|').replace('[','|').replace(')','').replace(']','')
extra_info = extra_info.split('|')
for info in extra_info:
info = info.strip()
if 'Rip' in info or '1080' in info or '720' in info or 'Screener' in info:
quality = info
if 'ingl' in info.lower():
info = 'VO'
if info in IDIOMAS:
lang = info
elif info.isdigit():
year = info
if lang in IDIOMAS:
lang = IDIOMAS[lang]
title = '%s' % scrapedtitle.strip()
if not config.get_setting('unify'):
if year.isdigit():
title = '%s [%s]' % (title, year)
if quality != '':
title = '%s [%s]' % (title, quality)
if lang != '':
title = '%s [%s]' % (title, lang)
thumbnail = host+scrapedthumbnail
url = host+scrapedurl
if item.type == 'series':
season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)x(\d+)')
infoLabels['season'] = season
infoLabels['episode'] = episode
else:
infoLabels['year'] = year
if title not in listed:
new_item = Item(channel=item.channel,
title=title,
url=url,
action='findvideos',
thumbnail=thumbnail,
type=item.type,
language = lang,
quality=quality,
link_type=item.link_type,
torrent_data= size,
infoLabels = infoLabels
)
if item.type == 'peliculas' or item.type == 'all':
new_item.contentTitle = scrapedtitle
else:
scrapedtitle = scrapedtitle.split(' - ')
new_item.contentSerieName = scrapedtitle[0]
itemlist.append(new_item)
listed.append(title)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.sort(key=lambda it: it.title)
# Paginación
if json_data['next']:
actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/')
next_page =int(actual_page) + 1
url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page)
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type,
action='list_all', send_lang=item.send_lang))
return itemlist
def findvideos(item):
logger.info()
import base64
itemlist = []
server = ''
data = get_source(item.url)
pre_url = scrapertools.find_single_match(data, 'class="inside-link" href="([^"]+)".*?<button type="button"')
data = get_source(host+pre_url)
patron = 'data-video="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
lang = item.language
quality = item.quality
for url in matches:
title = ''
link_type = ''
server = ''
url = base64.b64decode(url)
if 'torrent' in url:
if item.link_type == 'torrent' or item.type == 'all':
server = 'torrent'
link_type = 'torrent'
title = ' [%s]' % item.torrent_data
elif 'torrent' not in url:
link_type = 'flash'
if link_type == item.link_type.lower() or item.type == 'all':
itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', server=server,
language=lang, quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'ver/'
elif categoria == 'infantiles':
item.url = host + 'genero/animacion/'
elif categoria == 'terror':
item.url = host + 'genero/terror/'
elif categoria == 'documentales':
item.url = host + 'genero/terror/'
item.type=item.type
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,17 @@
{
"id": "xxxdan",
"name": "xxxdan",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://s0.cdn3x.com/xxxdan/i/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,107 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://xxxdan.com'
#NO SE REPRODUCE EL VIDEO QUE ENCUENTRA
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular30"))
itemlist.append( Item(channel=item.channel, title="Dururacion" , action="peliculas", url=host + "/longest"))
itemlist.append( Item(channel=item.channel, title="HD" , action="peliculas", url=host + "/channel30/hd"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search?query=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,cantidad,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" rel="tag".*?title="([^"]+)".*?data-original="([^"]+)".*?<span class="score">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = scrapedurl.replace("channel", "channel30")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><figure>\s*<a href="([^"]+)" class="img\s*" title="([^"]+)".*?data-original="([^"]+)".*?<time datetime="\w+">([^"]+)</time>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
url = scrapedurl
contentTitle = scrapedtitle
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">&rarr;</a>')
if next_page_url!="":
next_page_url = next_page_url.replace("http://xxxdan.com/","")
next_page_url = "/" + next_page_url
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
media_url = scrapertools.find_single_match(data, 'src:\'([^\']+)\'')
media_url = media_url.replace("https","http")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -6,7 +6,6 @@
"language": ["*"],
"thumbnail": "http://yuuk.net/wp-content/uploads/2018/06/yuuk_net_logo.png",
"banner": "",
],
"categories": [
"adult"
],

View File

@@ -17,7 +17,7 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-genres/"))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
return itemlist
@@ -39,11 +39,13 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist.append( Item(channel=item.channel, action="peliculas", title="Big Tits" , url="http://yuuk.net/?s=big+tit" , folder=True) )
patron = 'menu-item-object-category"><a href="([^"]+)">.*?</style>([^"]+)</a>'
itemlist.append( Item(channel=item.channel, title="Censored" , action="peliculas", url=host + "/category/censored/"))
itemlist.append( Item(channel=item.channel, title="Uncensored" , action="peliculas", url=host + "/category/uncensored/"))
patron = '<li><a href="([^"]+)" title="[^"]+"><span>([^"]+)</span><span>([^"]+)</span></a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
for scrapedurl,scrapedtitle,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
@@ -54,11 +56,12 @@ def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?>#([^"]+) Full HD JAV</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
for scrapedurl,scrapedtitle,scrapedthumbnail,calidad in matches:
scrapedplot = ""
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
if next_page_url!="":

View File

@@ -415,9 +415,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
season_episode = scrapertools.get_season_and_episode(e.title)
# Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal
if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anterioires
if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anteriores
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) #Path del .json del episodio
if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': #Guardamos urls de emergencia?
if not silent:
p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog
if json_path in ficheros: #Si existe el .json sacamos de ahí las urls
if overwrite: #pero solo si se se sobrescriben los .json
json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json
@@ -433,6 +435,8 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if e.emergency_urls: del e.emergency_urls
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo
elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia?
if not silent:
p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog
e = emergency_urls(e, channel, json_path) #generamos las urls
if e.emergency_urls: #Si ya tenemos urls...
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo

Binary file not shown.

After

Width:  |  Height:  |  Size: 764 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 978 KiB

View File

@@ -49,9 +49,15 @@ def connect(url):
# logger.info("Url: %s" % url)
global remote
server_name, server_ip, share_name, path, user, password, domain = parse_url(url)
#Da problemas asumir que la sesión está abierta. Si se abrió pero ha caducado, dará error. Mejor conectar siempre
"""
if not remote or not remote.sock or not server_name == remote.remote_name:
remote = SMBConnection(user, password, domain, server_name)
remote.connect(server_ip, 139)
"""
remote = SMBConnection(user, password, domain, server_name)
remote.connect(ip=server_ip, timeout=20)
return remote, share_name, path

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -151,6 +151,10 @@ def render_items(itemlist, parent_item):
# Si el item no contiene categoria, le ponemos la del item padre
if item.category == "":
item.category = parent_item.category
# Si title no existe, lo iniciamos como str, para evitar errones "NoType"
if not item.title:
item.title = ''
# Si el item no contiene fanart, le ponemos el del item padre
if item.fanart == "":
@@ -210,7 +214,7 @@ def render_items(itemlist, parent_item):
if item.fanart:
fanart = item.fanart
else:
fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg")
fanart = os.path.join(config.get_runtime_path(), "fanart-xmas.jpg")
# Creamos el listitem
#listitem = xbmcgui.ListItem(item.title)

View File

@@ -4,19 +4,23 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([0-9a-zA-Z-_]+)",
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([A-z0-9-_=]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)http://docs.google.com/get_video_info.*?docid=([0-9a-zA-Z-_]+)",
"pattern": "(?s)http://docs.google.com/get_video_info.*?docid=([A-z0-9-_=]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/preview",
"pattern": "https://drive.google.com/uc\\?id=([A-z0-9-_=]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://(lh.).googleusercontent.com/([0-9a-zA-Z-_=]+)",
"pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/(?:preview|edit)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://(lh.).googleusercontent.com/([A-z0-9-_=]+)",
"url": "https://\\1.googleusercontent.com/\\2"
}
]

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(jawcloud.co/embed-([A-z0-9]+))",
"pattern": "(jawcloud.co/(?:embed-|)([A-z0-9]+))",
"url": "https://\\1.html"
}
]

View File

@@ -7,6 +7,9 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "The file you were looking for could not be found" in data:
return False, "[jawcloud] El archivo ha ido borrado"
return True, ""

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://embed.mystream.to/(\\w+)",
"url": "https://embed.mystream.to/\\1"
}
]
},
"free": true,
"id": "mystream",
"name": "mystream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/t43grQdh/mystream1.png"
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector mystream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from lib.aadecode import decode as aadecode
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[mystream] El archivo no existe o ha sido borrado"
if "<title>video is no longer available" in data.data:
return False, "[mystream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""):
logger.info("url=" + page_url)
video_urls = []
headers = {'referer': page_url}
data = httptools.downloadpage(page_url, headers=headers).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
code = scrapertools.find_single_match(data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
text_decode = aadecode(code)
matches = scrapertools.find_multiple_matches(text_decode, "'src', '([^']+)'")
for url in matches:
video_urls.append(['mystream [mp4]',url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://rutube.ru/play/embed/[a-zA-Z0-9]+.p=[a-zA-Z0-9-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "rutube",
"name": "rutube",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://www.cubancouncil.com/uploads/project_images/rutube_branding_black.png.648x0_q90_replace_alpha.jpg"
}

View File

@@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
# -*- Server Rutube -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
from core import jsontools
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data or "File Not Found" in data:
return False, "[Rutube] El video ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
import urllib
video_urls = []
referer = ''
id = ''
if "|" in page_url:
page_url = page_url.replace('?', '|')
page_url, id, referer = page_url.split("|", 2)
header = {'referer':referer}
referer = urllib.urlencode(header)
"http://rutube.ru/api/play/options/10531822/?format=json&sqr4374_compat=1&no_404=true&referer=http%3A%2F%2Frutube.ru%2Fplay%2Fembed%2F10531822%3Fp%3DeDk8m91H0UBPOCUuFicFbQ&p=eDk8m91H0UBPOCUuFicFbQ"
base_link = page_url.replace("/play/embed/", "/api/play/options/")
new_link = base_link + '/?format=json&sqr4374_compat=1&no_404=true&%s&%s' % (referer, id)
data = httptools.downloadpage(new_link).data
json_data = jsontools.load(data)
video_urls.append(['Rutube', json_data['video_balancer']['m3u8']])
return video_urls

View File

@@ -3,13 +3,11 @@
# Conector UpVID By Alfa development Group
# --------------------------------------------------------
import re
import re, base64
from core import httptools
from core import scrapertools
from platformcode import logger
import re, base64
from lib.aadecode import decode as aadecode
from platformcode import logger
def test_video_exists(page_url):