@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.31" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.8" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,14 +19,13 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ maxipelis24 ¤ cuevana3 ¤ pelisplusco
|
||||
¤ mejortorrent ¤ newpct1
|
||||
¤ maxipelis24 ¤ cinecalidad ¤ inkapelis
|
||||
¤ seriesmetro ¤ inkaseries
|
||||
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ Mundopelis ¤ thevideobee ¤ tusfiles
|
||||
¤ vup
|
||||
¤ Siska ¤ xxxfreeinhd
|
||||
|
||||
¤ Agradecimientos a @mac12m99 y @chivmalev por colaborar con ésta versión
|
||||
¤ Agradecimientos a @chivmalev por colaborar con ésta versión.
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -46,7 +46,7 @@ def categorias(item):
|
||||
scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -57,7 +57,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item">.*?'
|
||||
patron += '<a href="([^"]+)" title="(.*?)">.*?'
|
||||
@@ -72,7 +72,7 @@ def lista(item):
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -82,7 +82,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
|
||||
@@ -70,7 +70,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -62,14 +62,14 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="alsoporn_prev">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
@@ -82,7 +82,8 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -93,7 +94,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
|
||||
data = scrapertools.cachePage(scrapedurl)
|
||||
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
|
||||
@@ -49,7 +49,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -71,8 +71,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -91,7 +91,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = title))
|
||||
fanart=thumbnail, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -32,7 +32,7 @@ def mainlist(item):
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/lista-de-anime.php",
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/catalogo.php?g=&t=series&o=0",
|
||||
thumbnail=thumb_series, range=[0,19]))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", contentTitle="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
|
||||
thumbnail=thumb_series, range=[0,19] ))
|
||||
|
||||
@@ -143,7 +143,7 @@ def start(itemlist, item):
|
||||
# 2: Solo servidores
|
||||
# 3: Solo calidades
|
||||
# 4: No ordenar
|
||||
if (settings_node['custom_servers'] and settings_node['custom_quality']) or get_setting('autoplay'):
|
||||
if (settings_node['custom_servers'] and settings_node['custom_quality']):
|
||||
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
|
||||
elif settings_node['custom_servers']:
|
||||
priority = 2 # Solo servidores
|
||||
@@ -254,7 +254,10 @@ def start(itemlist, item):
|
||||
autoplay_list.sort(key=lambda orden: orden['indice_quality'])
|
||||
|
||||
# Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final
|
||||
plan_b = settings_node['plan_b']
|
||||
try:
|
||||
plan_b = settings_node['plan_b']
|
||||
except:
|
||||
plan_b = True
|
||||
text_b = ''
|
||||
if plan_b:
|
||||
autoplay_list.extend(autoplay_b)
|
||||
@@ -321,7 +324,7 @@ def start(itemlist, item):
|
||||
platformtools.play_video(videoitem, autoplay=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
sleep(3)
|
||||
try:
|
||||
if platformtools.is_playing():
|
||||
PLAYED = True
|
||||
|
||||
@@ -50,7 +50,7 @@ def categorias(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -49,8 +49,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -68,7 +68,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle, fanart=scrapedthumbnail))
|
||||
contentTitle = scrapedtitle, fanart=thumbnail))
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
|
||||
if next_page:
|
||||
|
||||
@@ -311,6 +311,34 @@ def findvideos(item):
|
||||
'Mega': '',
|
||||
'MediaFire': ''}
|
||||
dec_value = scrapertools.find_single_match(data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')
|
||||
|
||||
torrent_link = scrapertools.find_single_match(data, '<a href="/protect/v\.php\?i=([^"]+)"')
|
||||
if torrent_link != '':
|
||||
import urllib
|
||||
base_url = '%s/protect/v.php' % host
|
||||
post = {'i': torrent_link, 'title': item.title}
|
||||
post = urllib.urlencode(post)
|
||||
headers = {'Referer': item.url}
|
||||
protect = httptools.downloadpage(base_url + '?' + post, headers=headers).data
|
||||
url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
|
||||
server = 'torrent'
|
||||
|
||||
title = item.contentTitle + ' (%s)' % server
|
||||
quality = 'default'
|
||||
language = IDIOMAS[lang]
|
||||
|
||||
new_item = Item(channel=item.channel,
|
||||
action='play',
|
||||
title=title,
|
||||
fulltitle=item.contentTitle,
|
||||
url=url,
|
||||
language=language,
|
||||
thumbnail=item.thumbnail,
|
||||
quality=quality,
|
||||
server=server
|
||||
)
|
||||
itemlist.append(new_item)
|
||||
|
||||
for video_cod, server_id in matches:
|
||||
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
|
||||
video_id = dec(video_cod, dec_value)
|
||||
@@ -321,25 +349,14 @@ def findvideos(item):
|
||||
if server_id == 'TVM':
|
||||
server = 'thevideome'
|
||||
url = server_url[server_id] + video_id + '.html'
|
||||
elif server_id == 'BitTorrent':
|
||||
import urllib
|
||||
base_url = '%s/protect/v.php' % host
|
||||
post = {'i':video_id, 'title':item.title}
|
||||
post = urllib.urlencode(post)
|
||||
headers = {'Referer':item.url}
|
||||
protect = httptools.downloadpage(base_url+'?'+post, headers=headers).data
|
||||
url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
|
||||
server = 'torrent'
|
||||
else:
|
||||
url = server_url[server_id] + video_id
|
||||
title = item.contentTitle + ' (%s)' % server
|
||||
quality = 'default'
|
||||
|
||||
if server_id not in ['Mega', 'MediaFire', 'Trailer']:
|
||||
if server != 'torrent':
|
||||
language = IDIOMAS[lang]
|
||||
else:
|
||||
language = [IDIOMAS[lang], 'vose']
|
||||
|
||||
language = [IDIOMAS[lang], 'vose']
|
||||
if url not in duplicados:
|
||||
new_item = Item(channel=item.channel,
|
||||
action='play',
|
||||
|
||||
@@ -47,7 +47,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -68,7 +68,7 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = title, infoLabels={'year':year} ))
|
||||
fanart=thumbnail, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
33
plugin.video.alfa/channels/community.json
Normal file
33
plugin.video.alfa/channels/community.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"id": "community",
|
||||
"name": "Community",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "",
|
||||
"banner": "",
|
||||
"fanart": "",
|
||||
"categories": [
|
||||
"direct",
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vo"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filterlanguages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces del canal en idioma...",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No Filtrar",
|
||||
"LAT",
|
||||
"CAST",
|
||||
"VO",
|
||||
"VOSE"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
299
plugin.video.alfa/channels/community.py
Normal file
299
plugin.video.alfa/channels/community.py
Normal file
@@ -0,0 +1,299 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Community -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import os
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from channelselector import get_thumb
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config, platformtools
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
list_data = {}
|
||||
|
||||
list_language = ['LAT', 'CAST', 'VO', 'VOSE']
|
||||
list_servers = ['directo']
|
||||
list_quality = ['SD', '720', '1080', '4k']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
if not os.path.exists(path):
|
||||
with open(path, "w") as file:
|
||||
file.write('{"channels":{}}')
|
||||
file.close()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
return show_channels(item)
|
||||
|
||||
|
||||
def show_channels(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
context = [{"title": "Eliminar este canal",
|
||||
"action": "remove_channel",
|
||||
"channel": "community"}]
|
||||
|
||||
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
file = open(path, "r")
|
||||
json = jsontools.load(file.read())
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Agregar un canal', action='add_channel', thumbnail=get_thumb('add.png')))
|
||||
|
||||
for key, channel in json['channels'].items():
|
||||
|
||||
if 'poster' in channel:
|
||||
poster = channel['poster']
|
||||
else:
|
||||
poster = ''
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=channel['channel_name'], url=channel['path'],
|
||||
thumbnail=poster, action='show_menu', channel_id = key, context=context))
|
||||
return itemlist
|
||||
|
||||
def load_json(item):
|
||||
logger.info()
|
||||
|
||||
if item.url.startswith('http'):
|
||||
json_file = httptools.downloadpage(item.url).data
|
||||
else:
|
||||
json_file = open(item.url, "r").read()
|
||||
|
||||
json_data = jsontools.load(json_file)
|
||||
|
||||
return json_data
|
||||
|
||||
def show_menu(item):
|
||||
global list_data
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
json_data = load_json(item)
|
||||
|
||||
if "menu" in json_data:
|
||||
for option in json_data['menu']:
|
||||
itemlist.append(Item(channel=item.channel, title=option['title'], action='show_menu', url=option['link']))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
if "movies_list" in json_data:
|
||||
item.media_type='movies_list'
|
||||
|
||||
elif "tvshows_list" in json_data:
|
||||
item.media_type = 'tvshows_list'
|
||||
|
||||
elif "episodes_list" in json_data:
|
||||
item.media_type = 'episodes_list'
|
||||
|
||||
return list_all(item)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
media_type = item.media_type
|
||||
json_data = load_json(item)
|
||||
for media in json_data[media_type]:
|
||||
|
||||
quality, language, plot, poster = set_extra_values(media)
|
||||
|
||||
title = media['title']
|
||||
title = set_title(title, language, quality)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, quality=quality,
|
||||
language=language, plot=plot, thumbnail=poster)
|
||||
|
||||
|
||||
if 'movies_list' in json_data:
|
||||
new_item.url = media
|
||||
new_item.contentTitle = media['title']
|
||||
new_item.action = 'findvideos'
|
||||
if 'year' in media:
|
||||
new_item.infoLabels['year'] = media['year']
|
||||
else:
|
||||
new_item.url = media['seasons_list']
|
||||
new_item.contentSerieName = media['title']
|
||||
new_item.action = 'seasons'
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
list_seasons = item.url
|
||||
for season in list_seasons:
|
||||
infoLabels['season'] = season['season']
|
||||
title = 'Temporada %s' % season['season']
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=season['link'], action='episodesxseason',
|
||||
contentSeasonNumber=season['season'], infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
itemlist = sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodesxseason(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
json_data = load_json(item)
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
season_number = infoLabels['season']
|
||||
for episode in json_data['episodes_list']:
|
||||
episode_number = episode['number']
|
||||
infoLabels['season'] = season_number
|
||||
infoLabels['episode'] = episode_number
|
||||
|
||||
title = '%sx%s - Episodio %s' % (season_number, episode_number, episode_number)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=episode, action='findvideos',
|
||||
contentEpisodeNumber=episode_number, infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
for url in item.url['links']:
|
||||
quality, language, plot, poster = set_extra_values(url)
|
||||
title = ''
|
||||
title = set_title(title, language, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url['url'], action='play', quality=quality,
|
||||
language=language, infoLabels = item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def add_channel(item):
|
||||
logger.info()
|
||||
import xbmc
|
||||
import xbmcgui
|
||||
channel_to_add = {}
|
||||
json_file = ''
|
||||
result = platformtools.dialog_select('Agregar un canal', ['Desde archivo local', 'Desde URL'])
|
||||
if result == -1:
|
||||
return
|
||||
if result==0:
|
||||
file_path = xbmcgui.Dialog().browseSingle(1, 'Alfa - (Comunidad)', 'files')
|
||||
try:
|
||||
channel_to_add['path'] = file_path
|
||||
json_file = jsontools.load(open(file_path, "r").read())
|
||||
channel_to_add['channel_name'] = json_file['channel_name']
|
||||
except:
|
||||
pass
|
||||
|
||||
elif result==1:
|
||||
url = platformtools.dialog_input("", 'Ingresa la URL del canal', False)
|
||||
try:
|
||||
channel_to_add['path'] = url
|
||||
json_file = jsontools.load(httptools.downloadpage(url).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(json_file) == 0:
|
||||
return
|
||||
if "episodes_list" in json_file:
|
||||
platformtools.dialog_ok('Alfa', 'No es posible agregar este tipo de canal')
|
||||
return
|
||||
channel_to_add['channel_name'] = json_file['channel_name']
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
|
||||
community_json = open(path, "r")
|
||||
community_json = jsontools.load(community_json.read())
|
||||
id = len(community_json['channels']) + 1
|
||||
community_json['channels'][id]=(channel_to_add)
|
||||
|
||||
with open(path, "w") as file:
|
||||
file.write(jsontools.dump(community_json))
|
||||
file.close()
|
||||
|
||||
platformtools.dialog_notification('Alfa', '%s se ha agregado' % json_file['channel_name'])
|
||||
return
|
||||
|
||||
def remove_channel(item):
|
||||
logger.info()
|
||||
import xbmc
|
||||
import xbmcgui
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
|
||||
community_json = open(path, "r")
|
||||
community_json = jsontools.load(community_json.read())
|
||||
|
||||
id = item.channel_id
|
||||
to_delete = community_json['channels'][id]['channel_name']
|
||||
del community_json['channels'][id]
|
||||
with open(path, "w") as file:
|
||||
file.write(jsontools.dump(community_json))
|
||||
file.close()
|
||||
|
||||
platformtools.dialog_notification('Alfa', '%s ha sido eliminado' % to_delete)
|
||||
platformtools.itemlist_refresh()
|
||||
return
|
||||
|
||||
|
||||
def set_extra_values(dict):
|
||||
logger.info()
|
||||
quality = ''
|
||||
language = ''
|
||||
plot = ''
|
||||
poster = ''
|
||||
|
||||
if 'quality' in dict and dict['quality'] != '':
|
||||
quality = dict['quality'].upper()
|
||||
if 'language' in dict and dict['language'] != '':
|
||||
language = dict['language'].upper()
|
||||
if 'plot' in dict and dict['plot'] != '':
|
||||
plot = dict['plot']
|
||||
if 'poster' in dict and dict['poster'] != '':
|
||||
poster = dict['poster']
|
||||
|
||||
return quality, language, plot, poster
|
||||
|
||||
def set_title(title, language, quality):
|
||||
logger.info()
|
||||
|
||||
if not config.get_setting('unify'):
|
||||
if quality != '':
|
||||
title += ' [%s]' % quality
|
||||
if language != '':
|
||||
if not isinstance(language, list):
|
||||
title += ' [%s]' % language.upper()
|
||||
else:
|
||||
title += ' '
|
||||
for lang in language:
|
||||
title += '[%s]' % lang.upper()
|
||||
|
||||
return title.capitalize()
|
||||
@@ -59,7 +59,7 @@ def lista(item):
|
||||
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
|
||||
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, viewmode="movie") )
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
|
||||
@@ -79,8 +79,8 @@ def mainpage(item):
|
||||
itemlist = []
|
||||
data1 = httptools.downloadpage(item.url).data
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
|
||||
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
|
||||
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
|
||||
patron_sec='<divclass=head-main-nav>(.+?)peliculas\/>'
|
||||
patron='<ahref=([^"]+)>([^"]+)<\/a>'#scrapedurl, #scrapedtitle
|
||||
data = scrapertools.find_single_match(data1, patron_sec)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.title=="Géneros" or item.title=="Categorías":
|
||||
@@ -111,8 +111,8 @@ def lista(item):
|
||||
'<div id="archive-content" class="animation-2 items">(.*)<a href=\'')
|
||||
else:
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<div class="items">(.+?)<\/div><\/div><div class=.+?>')
|
||||
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
|
||||
'<divclass=items><article(.+?)<\/div><\/article><\/div>')
|
||||
patron = '<imgsrc=([^"]+) alt="([^"]+)">.+?<ahref=([^"]+)><divclass=see>.+?<divclass=texto>(.+?)<\/div>'
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron)
|
||||
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
|
||||
if item.title=="Peliculas Animadas":
|
||||
@@ -133,13 +133,15 @@ def episodios(item):
|
||||
itemlist = []
|
||||
infoLabels = {}
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)<ul class="episodios">(.+?)<span>Compartido'
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info(data)
|
||||
patron = '<divid=episodes (.+?)<\/div><\/div><\/div>'
|
||||
data_lista = scrapertools.find_single_match(data,patron)
|
||||
contentSerieName = item.title
|
||||
patron_caps = 'href="([^"]+)".*?'
|
||||
patron_caps += 'src="([^"]+)".*?'
|
||||
patron_caps += 'numerando">([^<]+).*?'
|
||||
patron_caps += 'episodiotitle">.*?>([^<]+)'
|
||||
patron_caps = 'href=(.+?)><imgalt=".+?" '
|
||||
patron_caps += 'src=([^"]+)><\/a>.*?'
|
||||
patron_caps += 'numerando>([^<]+).*?'
|
||||
patron_caps += 'episodiotitle>.*?>([^<]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtempepi, scrapedtitle in matches:
|
||||
tempepi=scrapedtempepi.split(" - ")
|
||||
@@ -148,7 +150,7 @@ def episodios(item):
|
||||
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
|
||||
item.infoLabels["season"] = tempepi[0]
|
||||
item.infoLabels["episode"] = tempepi[1]
|
||||
itemlist.append(item.clone(thumbnail=scrapedthumbnail,
|
||||
itemlist.append(item.clone(#thumbnail=scrapedthumbnail,
|
||||
action="findvideos", title=title, url=scrapedurl))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + contentSerieName + " a la videoteca[/COLOR]", url=item.url,
|
||||
|
||||
@@ -6,6 +6,7 @@ import urlparse
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -18,7 +19,7 @@ def mainlist(item):
|
||||
# ------------------------------------------------------
|
||||
# Descarga la página
|
||||
# ------------------------------------------------------
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.info(data)
|
||||
|
||||
# ------------------------------------------------------
|
||||
@@ -68,7 +69,7 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info(data)
|
||||
|
||||
# Extrae las películas
|
||||
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
url="https:" + scrapedurl
|
||||
thumbnail="https:" + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
|
||||
plot=scrapedplot) )
|
||||
fanart=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>')
|
||||
if next_page == "":
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>»</a>')
|
||||
|
||||
@@ -57,8 +57,8 @@ def lista(item):
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=scrapedthumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle) )
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle) )
|
||||
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente »</a>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
|
||||
@@ -18,6 +18,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/pornstars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -50,7 +51,13 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
if "/categories/" in item.url:
|
||||
itemlist = sorted(itemlist, key=lambda i: i.title)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -69,7 +76,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle) )
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
|
||||
@@ -48,7 +48,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -59,11 +59,14 @@ def lista(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" itemprop="url">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<span itemprop="duration" class="length">(.*?)</span>'
|
||||
patron += '<span itemprop="duration" class="length">(.*?)</span>(.*?)<span class="thumb-info">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion,calidad in matches:
|
||||
url = scrapedurl
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
if ">HD<" in calidad:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " +scrapedtitle
|
||||
else:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
|
||||
@@ -8,7 +8,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
# BLOQUEO ESET INTERNET SECURITY
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -30,7 +30,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ def lista(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)">Next')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -52,7 +52,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="thumb tco1" href="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail + "|Referer=%s" %host
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -16,6 +16,7 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-raped/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -39,25 +40,32 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="thumb thumb-category">.*?'
|
||||
patron = '<li class="thumb thumb-\w+">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img class="lazy" data-original="([^"]+)">.*?'
|
||||
patron += '<div class="name">([^"]+)</div>.*?'
|
||||
patron += '<div class="count">(\d+)</div>'
|
||||
patron += '<img class="lazy" data-original="([^"]+)".*?'
|
||||
patron += '<div class="title">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
title = scrapertools.find_single_match(scrapedtitle,'<div class="text">([^<]+)<')
|
||||
if "/categories/" in item.url:
|
||||
cantidad = scrapertools.find_single_match(scrapedtitle,'<div class="count">(\d+)</div>')
|
||||
scrapedtitle = scrapertools.find_single_match(scrapedtitle,'<div class="name">([^<]+)</div>')
|
||||
title = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="thumb">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
@@ -72,7 +80,7 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -83,7 +91,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<meta property="og:video" content="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -77,11 +77,12 @@ def lista(item):
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
title = scrapedtitle
|
||||
calidad = scrapertools.find_single_match(scrapedtitle, '(\(.*?\))')
|
||||
title = "[COLOR yellow]" + calidad + "[/COLOR] " + scrapedtitle.replace( "%s" % calidad, "")
|
||||
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, fulltitle=title) )
|
||||
fanart=thumbnail, plot=plot, fulltitle=title) )
|
||||
next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -45,7 +45,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video-thumb"><a href="([^"]+)" class="title".*?>([^"]+)</a>.*?'
|
||||
patron += '<span class="time">([^<]+)</span>.*?'
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -80,7 +80,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source data-fluid-hd src="([^"]+)/?br=\d+"')
|
||||
if scrapedurl=="":
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)/?br=\d+"')
|
||||
|
||||
@@ -270,6 +270,7 @@ def entradas(item):
|
||||
item.text_color = color2
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub("\n", "", data)
|
||||
if "valores" in item and item.valores:
|
||||
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
|
||||
|
||||
@@ -295,7 +296,6 @@ def entradas(item):
|
||||
# Extrae las entradas
|
||||
if item.extra == "Novedades":
|
||||
data2 = data.split("<h2>Últimas Películas Agregadas y Actualizadas</h2>", 1)[1]
|
||||
|
||||
entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
|
||||
else:
|
||||
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
|
||||
@@ -381,6 +381,7 @@ def findvideos(item):
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub("\n", "", data)
|
||||
sinopsis = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>.*?>(.*?)</p>')
|
||||
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
|
||||
# Busca en tmdb si no se ha hecho antes
|
||||
@@ -409,8 +410,8 @@ def findvideos(item):
|
||||
if server == "Ul":
|
||||
server = "Uploaded"
|
||||
title = "%s [%s][%s]" % (server, idioma, calidad)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma, quality=calidad,
|
||||
server=server, infoLabels=item.infoLabels))
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma,
|
||||
quality=calidad, server=server, infoLabels=item.infoLabels))
|
||||
|
||||
patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -420,8 +421,8 @@ def findvideos(item):
|
||||
title = "Directo"
|
||||
idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed)
|
||||
title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma, quality=calidad,
|
||||
server=server))
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma,
|
||||
quality=calidad, server=server))
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
@@ -25,7 +25,7 @@ list_servers = ['openload', 'gamovideo', 'streamplay', 'flashx', 'streamito', 's
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
@@ -51,8 +51,8 @@ def list_all(item):
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data1 = scrapertools.find_single_match(data, '<div class=col-md-80 lado2(.*?)</div></div></div>')
|
||||
patron = '<a class=poster href=(.*?) title=(.*?)> <img.*?src=(.*?) alt'
|
||||
data1 = scrapertools.find_single_match(data, '<div class="col-md-80 lado2"(.*?)</div></div></div>')
|
||||
patron = '<a class="poster" href="([^"]+)" title="([^"]+)"><img.*?src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data1)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
@@ -70,7 +70,7 @@ def list_all(item):
|
||||
|
||||
if itemlist != []:
|
||||
actual_page_url = item.url
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href=([^ ]+)><span aria-hidden=true>»</span>')
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)"><span aria-hidden="true">»</span>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'))
|
||||
@@ -83,7 +83,7 @@ def seasons(item):
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = '</span> Temporada (\d+) </a>'
|
||||
patron = '</span>Temporada (\d+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
@@ -118,9 +118,9 @@ def episodesxseason(item):
|
||||
data = get_source(item.url)
|
||||
|
||||
if item.extra1 != 'library':
|
||||
patron = '<tr><td>.*?<a href=([^\s]+) title=Temporada %s, Episodio (\d+.*?)>' % item.contentSeasonNumber
|
||||
patron = '<tr><td>.*?<a href="([^"]+)" title="Temporada %s, Episodio (\d+.*?)>' % item.contentSeasonNumber
|
||||
else:
|
||||
patron = '<tr><td>.*?<a href=([^\s]+) title=Temporada \d+, Episodio (\d+.*?)>'
|
||||
patron = '<tr><td>.*?<a href="([^"]+)" title=Temporada \d+, Episodio (\d+.*?)>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
@@ -148,7 +148,7 @@ def genres(item):
|
||||
itemlist = []
|
||||
norep = []
|
||||
data = get_source(item.url)
|
||||
patron = '<a href=([^>]+)><span.*?<i>(.*?)</i>.*?>(.*?)</b>'
|
||||
patron = '<a href="([^"]+)"><span.*?<i>([^<])</i>.*?>(.*?)</b>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, cantidad in matches:
|
||||
@@ -167,7 +167,7 @@ def findvideos(item):
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<td><a ([^\s]+) class=btn.*?style=margin:.*?<span>.*?</span></td><td>(.*?)</td><td>.*?</td>'
|
||||
patron = '<td><a href="([^"]+)" class="btn.*?style="margin:.*?<span>.*?</span></td><td>(.*?)</td><td>.*?</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, language in matches:
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://javl.in'
|
||||
|
||||
# BLOQUEO ANTIVIRUS
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
@@ -27,7 +27,7 @@ def mainlist(item):
|
||||
fanart = ''
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail))
|
||||
|
||||
# Paginacion
|
||||
title = ''
|
||||
|
||||
@@ -50,7 +50,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
|
||||
@@ -51,7 +51,7 @@ def categorias(item):
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
|
||||
return itemlist
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -67,7 +67,7 @@ def lista(item):
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
scrapedurl = "http://xxx.justporno.tv/embed/" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(.*?)>')
|
||||
|
||||
4
plugin.video.alfa/channels/maxipelis24.py
Normal file → Executable file
4
plugin.video.alfa/channels/maxipelis24.py
Normal file → Executable file
@@ -12,7 +12,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = "https://maxipelis24.tv"
|
||||
host = "https://maxipelis24.live"
|
||||
|
||||
IDIOMAS = {'Latino': 'Latino', 'Sub':'VOSE', 'Subtitulado': 'VOSE', 'Español': 'CAST', 'Castellano':'CAST'}
|
||||
list_language = IDIOMAS.values()
|
||||
@@ -135,7 +135,7 @@ def findvideos(item):
|
||||
action='play', language=IDIOMAS[idioma], infoLabels=item.infoLabels)
|
||||
itemlist.append(new_item)
|
||||
|
||||
if 'maxipelis24.tv/hideload/?' in link:
|
||||
if '/hideload/?' in link:
|
||||
id_letter = scrapertools.find_single_match(link, '?(\w)d')
|
||||
id_type = '%sd' % id_letter
|
||||
ir_type = '%sr' % id_letter
|
||||
|
||||
@@ -64,7 +64,7 @@ def peliculas(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' class="next">Next >></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
|
||||
@@ -8,15 +8,15 @@ from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'https://www.muchoporno.xxx'
|
||||
host = 'https://www.pornburst.xxx'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/page3.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/pornstars/"))
|
||||
#itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/sitios/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorias/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/sites/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -39,11 +39,12 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if "/sitios/" in item.url:
|
||||
patron = '<div class="muestra-escena muestra-canales">.*?href="(.*?)">.*?'
|
||||
patron += 'src="(.*?)".*?'
|
||||
patron += '<a title="(.*?)".*?'
|
||||
patron += '</span> (.*?) videos</span>'
|
||||
if "/sites/" in item.url:
|
||||
patron = '<div class="muestra-escena muestra-canales">.*?'
|
||||
patron += 'href="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<a title="([^"]+)".*?'
|
||||
patron += '</span> (\d+) videos</span>'
|
||||
if "/pornstars/" in item.url:
|
||||
patron = '<a class="muestra-escena muestra-pornostar" href="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
@@ -62,8 +63,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + cantidad
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -74,7 +75,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="muestra-escena"\s*href="([^"]+)".*?'
|
||||
patron += 'data-stats-video-name="([^"]+)".*?'
|
||||
@@ -89,8 +90,8 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -100,7 +101,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" type="video/mp4"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -72,7 +72,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title))
|
||||
fanart=thumbnail, plot=plot, contentTitle=title))
|
||||
# <li class='active'><a class=''>1</a></li><li><a rel='nofollow' class='page larger' href='https://pandamovies.pw/movies/page/2'>
|
||||
next_page = scrapertools.find_single_match(data, '<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page == "":
|
||||
|
||||
@@ -6,37 +6,26 @@ import urlparse
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
if item.url == "":
|
||||
item.url = "http://www.peliculaseroticas.net/"
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
patron = '<div class="post"[^<]+'
|
||||
patron += '<a href="([^"]+)">([^<]+)</a[^<]+'
|
||||
patron += '<hr[^<]+'
|
||||
patron += '<a[^<]+<img src="([^"]+)"'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post">.*?'
|
||||
patron += '<a href="([^"]+)">([^<]+)</a>.*?'
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle.strip()
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = ""
|
||||
|
||||
# Añade al listado
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, viewmode="movie", folder=True))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie"))
|
||||
# Extrae la marca de siguiente página
|
||||
if item.url == "http://www.peliculaseroticas.net/":
|
||||
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/2.html"
|
||||
@@ -44,8 +33,6 @@ def mainlist(item):
|
||||
current_page = scrapertools.find_single_match(item.url, "(\d+)")
|
||||
next_page = int(current_page) + 1
|
||||
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html"
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url, folder=True))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -35,7 +35,8 @@ def lista(item):
|
||||
if duration:
|
||||
scrapedtitle += " (%s)" % duration
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
|
||||
|
||||
@@ -70,7 +70,7 @@ def peliculas(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=scrapedthumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
|
||||
@@ -10,14 +10,16 @@ from core import httptools
|
||||
|
||||
host = 'https://www.porn300.com'
|
||||
|
||||
#BLOQUEO ANTIVIRUS STREAMCLOUD
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/es/videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/es/mas-vistos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/es/mas-votados/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/es/pornostars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/?page=1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/es/pornostars/?page=1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/es/categorias/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -56,8 +58,11 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/?sort=latest"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
if next_page=="":
|
||||
if "/?page=1" in item.url:
|
||||
next_page=urlparse.urljoin(item.url,"/?page=2")
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -67,7 +72,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)" data-video-id="\d+" title="([^"]+)">.*?'
|
||||
patron += '<img itemprop="thumbnailUrl" src="([^"]+)".*?'
|
||||
@@ -81,7 +86,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle) )
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -91,11 +96,10 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -59,16 +59,16 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article id=post-\d+.*?'
|
||||
patron += '<img class="center cover" src=([^"]+) alt="([^"]+)".*?'
|
||||
patron += '<blockquote>.*?<a href=(.*?) target=_blank>'
|
||||
patron += '<blockquote><p> <a href=(.*?) target=_blank'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedthumbnail,scrapedtitle,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class=nextpostslink rel=next href=(.*?)>')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -77,6 +77,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -52,7 +52,7 @@ def lista(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<nav id="page_nav"><a href="(.*?)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
import base64
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -10,6 +12,7 @@ from core import httptools
|
||||
|
||||
host = 'http://www.pornhive.tv/en'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -66,22 +69,25 @@ def lista(item):
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title))
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle=title))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next ›')
|
||||
if next_page != "" :
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = ';extra_urls\[\d+\]=\'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = base64.b64decode(scrapedurl)
|
||||
itemlist.append(item.clone(action="play", title="%s", url=scrapedurl))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -50,8 +50,8 @@ def categorias(item):
|
||||
else:
|
||||
url = urlparse.urljoin(item.url, scrapedurl + "?o=cm")
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart,
|
||||
thumbnail=scrapedthumbnail))
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail))
|
||||
itemlist.sort(key=lambda x: x.title)
|
||||
return itemlist
|
||||
|
||||
@@ -73,7 +73,7 @@ def peliculas(item):
|
||||
title += ' [HD]'
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, thumbnail=thumbnail))
|
||||
Item(channel=item.channel, action="play", title=title, url=url, fanart=thumbnail, thumbnail=thumbnail))
|
||||
if itemlist:
|
||||
# Paginador
|
||||
patron = '<li class="page_next"><a href="([^"]+)"'
|
||||
@@ -88,7 +88,7 @@ def peliculas(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
@@ -96,3 +96,4 @@ def play(item):
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ def categorias(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
fanart=thumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="direction"><a href="([^"]+)" data-ajax="pagination">')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://qwertty.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -64,7 +65,7 @@ def lista(item):
|
||||
scrapedplot = ""
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
|
||||
if next_page=="":
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
|
||||
@@ -77,10 +78,11 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data,'<meta itemprop="embedURL" content="([^"]+)"')
|
||||
url = url.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = scrapertools.cachePage(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
# data = scrapertools.cachePage(url) https://www.spankwire.com/EmbedPlayer.aspx?ArticleId=14049072
|
||||
if "xvideos" in url :
|
||||
scrapedurl = scrapertools.find_single_match(data,'setVideoHLS\(\'([^\']+)\'')
|
||||
if "pornhub" in url :
|
||||
|
||||
@@ -46,11 +46,12 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] "
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
@@ -58,22 +59,30 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos'
|
||||
patron = '<div class="category_item_wrapper">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += '<span class="category_count">([^"]+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
cantidad = cantidad.strip()
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?<span class="duration">(.*?)</a>.*?<a title="([^"]+)" href="([^"]+)">'
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?'
|
||||
patron += '<span class="duration">(.*?)</a>.*?'
|
||||
patron += '<a title="([^"]+)" href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,duration,scrapedtitle,scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -82,23 +91,25 @@ def peliculas(item):
|
||||
duration = scrapertools.find_single_match(duration, 'HD</span>(.*?)</span>')
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle
|
||||
else:
|
||||
duration = duration.replace("<span class=\"vr-video\">VR</span>", "")
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
|
||||
title = title.replace(" </span>", "").replace(" ", "")
|
||||
scrapedthumbnail = scrapedthumbnail.replace("{index}.", "1.")
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
if not "/premium/" in url:
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"defaultQuality":true,"format":"",.*?"videoUrl"\:"([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -29,6 +29,7 @@ def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
logger.debug(data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
@@ -59,10 +60,10 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-lazy-src="([^"]+)"'
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)".*?src="([^"]+)".*?title=".*?">([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
scrapedtitle = scrapedtitle.lower().replace('enlace permanente a', '').capitalize()
|
||||
contentSerieName = scrapedtitle
|
||||
|
||||
@@ -13,25 +13,25 @@ host = "https://www.serviporno.com"
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Útimos videos", url= host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Más votados", url="http://www.serviporno.com/mas-votados/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias",
|
||||
url="http://www.serviporno.com/categorias/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="chicas", title="Chicas", url="http://www.serviporno.com/pornstars/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url="http://www.serviporno.com/search/?q="))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos",
|
||||
url=host + "/ajax/homepage/?page=1", last= host))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Más vistos",
|
||||
url=host + "/ajax/most_viewed/?page=1", last= host + "/mas-vistos/"))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Más votados",
|
||||
url=host + "/ajax/best_rated/?page=1", last= host + "/mas-votados/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Canal",
|
||||
url=host + "/ajax/list_producers/?page=1", last= host + "/sitios/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url= host + "/categorias/"))
|
||||
itemlist.append(Item(channel=item.channel, action="chicas", title="Chicas",
|
||||
url=host + "/ajax/list_pornstars/?page=1", last= host + "/pornstars/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", last=""))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.url = host + '/ajax/new_search/?q=%s&page=1' % texto
|
||||
try:
|
||||
return videos(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -42,29 +42,39 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def get_last_page(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
last_page= int(scrapertools.find_single_match(data,'data-ajax-last-page="(\d+)"'))
|
||||
return last_page
|
||||
|
||||
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '(?s)<div class="wrap-box-escena">.*?'
|
||||
patron += '<div class="box-escena">.*?'
|
||||
patron += '<a\s*href="([^"]+)".*?'
|
||||
patron += 'data-stats-video-name="([^"]+)".*?'
|
||||
patron += '<img\s*src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, title, thumbnail in matches:
|
||||
patron += '<img\s*src="([^"]+)".*?'
|
||||
patron += '<div class="duracion">([^"]+) min</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for url, title, thumbnail,duration in matches:
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + title
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail))
|
||||
|
||||
# Paginador
|
||||
patron = '<a href="([^<]+)">Siguiente »</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if len(matches) > 0:
|
||||
url = "http://www.serviporno.com" + matches[0]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Página Siguiente", url=url, thumbnail="", folder=True))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail, fanart=thumbnail))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -72,18 +82,28 @@ def chicas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
|
||||
patron = '<div class="box-chica">.*?'
|
||||
patron += '<a href="([^"]+)" title="">.*?'
|
||||
patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+"/>.*?'
|
||||
patron += '</a>[^<]{1}<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?'
|
||||
patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+" />.*?'
|
||||
patron += '<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?'
|
||||
patron += '<a class="total-videos" href="[^"]+" title="">([^<]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, thumbnail, title, videos in matches:
|
||||
url = urlparse.urljoin("http://www.serviporno.com", url)
|
||||
last = urlparse.urljoin(item.url, url)
|
||||
url= last.replace("/pornstar", "/ajax/show_pornstar") + "?page=1"
|
||||
title = title + " (" + videos + ")"
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot=""))
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, fanart=thumbnail))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="chicas", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -91,16 +111,25 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
|
||||
patron = '<div class="wrap-box-escena">.*?'
|
||||
patron += '<div class="cat box-escena">.*?'
|
||||
patron += '<a href="([^"]+)"><img src="([^"]+)" alt="Webcam" height="150" width="175" border=0 /></a>.*?'
|
||||
patron += '<h4><a href="[^"]+">([^<]+)</a></h4>'
|
||||
|
||||
patron = '<div class="wrap-box-escena.*?'
|
||||
patron += 'href="([^"]+)"><img src="([^"]+)".*?'
|
||||
patron += '<h4.*?<a href="[^"]+">([^<]+)</a></h4>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, thumbnail, title in matches:
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot=""))
|
||||
last = urlparse.urljoin(item.url, url)
|
||||
url= last.replace("/videos-porno", "/ajax/show_category").replace("/sitio","/ajax/show_producer") + "?page=1"
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, plot=""))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -113,3 +142,4 @@ def play(item):
|
||||
Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail,
|
||||
plot=item.plot, folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ host = 'http://sexgalaxy.net'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/full-movies/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Videos", action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Canales", action="canales", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -81,10 +81,10 @@ def lista(item):
|
||||
if calidad:
|
||||
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page))
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="SexMUSIC" , action="lista", url=host + "/topics/sexo-music-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Xshows" , action="lista", url=host + "/xshows/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
# itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -42,30 +42,17 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Canal" :
|
||||
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
|
||||
data = scrapertools.get_match(data,'>Adult Porn Parodies</a></li>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">')
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Big tit", url="https://sexofilm.com/?s=big+tits"))
|
||||
|
||||
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
patron = '<a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">')
|
||||
patron = '<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -83,9 +70,9 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"'
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
plot = ""
|
||||
title = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "")
|
||||
itemlist.append(item.clone(action="play", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
|
||||
15
plugin.video.alfa/channels/siska.json
Normal file
15
plugin.video.alfa/channels/siska.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "siska",
|
||||
"name": "siska",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.siska.tv/images/siska.png?50",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
89
plugin.video.alfa/channels/siska.py
Normal file
89
plugin.video.alfa/channels/siska.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'http://www.siska.tv/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "newVideo.php?language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "MostViewed.php?views=month&language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "Channel.php?language=en"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "index.php?category=1&language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "search.php?q=%s&language=en&search=Search" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
data = scrapertools.get_match(data,'<div id="content">(.*?)<div class="maincat">')
|
||||
patron = '<a href="(.*?)".*?'
|
||||
patron += '<img src="(.*?)".*?alt="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("Watch Channel ", "")
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
|
||||
thumbnail=thumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
if "catID=" in item.url:
|
||||
patron = '<li><h3><a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)" class="imgt" alt="([^"]+)".*?'
|
||||
patron += '<div class="time">(.*?)</div>'
|
||||
else:
|
||||
patron = '<li><h3><a href=\'([^\']+)\'>.*?'
|
||||
patron += '<img src=\'([^\']+)\' class=\'imgt\' alt=\'(.*?)\'.*?'
|
||||
patron += '<div class=\'time\'>(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
scrapedtime = scrapedtime.replace("Duration: ", "").replace(" : ", ":")
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span>Next')
|
||||
if next_page == "":
|
||||
next_page = scrapertools.find_single_match(data, '<a href=\'([^\']+)\' title=\'Next Page\'>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "spankwire",
|
||||
"name": "spankwire",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://cdn1-static-spankwire.spankcdn.net/apple-touch-icon-precomposed.png",
|
||||
|
||||
@@ -38,7 +38,7 @@ def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpageGzip(item.url)
|
||||
patron = '<div class="item-block item-normal col" >.*?'
|
||||
patron = '<div class="item-block item-normal col".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += '</span> ([^"]+)<'
|
||||
|
||||
@@ -67,7 +67,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -78,19 +78,24 @@ def catalogo(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
patron = '<p class="btime">([^"]+)</p>.*?href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)">'
|
||||
patron = '<p class="btime">([^"]+)</p>.*?'
|
||||
patron += '>(.*?)<img width=.*?'
|
||||
patron += '="([^"]+)" class="thumb.*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for duracion,scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
for duracion,calidad,scrapedthumbnail,scrapedtitle,scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
if ">HD<" in calidad:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = contentTitle))
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -101,7 +106,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -69,7 +69,11 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="PornStar", action="catalogo",
|
||||
url=host + '/pornstars/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
|
||||
url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
@@ -100,28 +104,45 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a class="[^"]+" href="([^"]+)">' # url
|
||||
patron += '<img id="[^"]+".*?src="([^"]+)".*?' # img
|
||||
patron += '<span class="title">([^<]+)</span>.*?' # title
|
||||
patron += '<span class="duration">([^<]+)</span>' # time
|
||||
patron += '<span class="duration"(.*?)</a>' # time
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtime in matches:
|
||||
time = scrapertools.find_single_match(scrapedtime, '>([^<]+)</span>')
|
||||
title = "[%s] %s" % (time, scrapedtitle)
|
||||
|
||||
if ">HD<" in scrapedtime:
|
||||
title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail,
|
||||
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="videos",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
return itemlist
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="pornstars">.*?<a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
itemlist.append(Item(channel=item.channel, action="videos", url=url, title=scrapedtitle, fanart=scrapedthumbnail,
|
||||
thumbnail=scrapedthumbnail, viewmode="movie_with_plot"))
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="catalogo",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -133,9 +154,7 @@ def categorias(item):
|
||||
# logger.info(data)
|
||||
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
|
||||
patron += '<span class="count">([^<]+)</span>' # title, vids
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, vids in matches:
|
||||
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
|
||||
title = "%s (%s)" % (scrapedtitle, vids.title())
|
||||
@@ -144,7 +163,6 @@ def categorias(item):
|
||||
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
|
||||
title=title, url=url, thumbnail=thumbnail,
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -153,5 +171,5 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '')
|
||||
itemlist.append(item.clone(url=url, title=item.contentTile))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -60,7 +60,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = 'href="([^"]+)"\s*class="th-video.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
@@ -85,7 +85,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
|
||||
@@ -56,17 +56,39 @@ def lista(item):
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
|
||||
# <article id='post-id-40630' class='video-index ceil'>
|
||||
# <div class='thumb-wrapp'>
|
||||
# <a href='http://www.vidz78.com/2019/03/22/deux-blacks-tbm-pour-julia-30ans/189/' class='thumb' style='background-image:url("https://pp.userapi.com/c855416/v855416475/ab7f/utBev5x7QuA.jpg")'>
|
||||
# <div class='overlay'></div>
|
||||
# <div class='vl'>
|
||||
# <div class="hd">HD</div> <div class="duration">36:28</div> </div>
|
||||
# </a>
|
||||
# </div>
|
||||
# <div class='info-card'>
|
||||
# <h6><a class='hp' href='http://www.vidz78.com/2019/03/22/deux-blacks-tbm-pour-julia-30ans/189/'>Jacquieetmicheltv - Deux blacks TBM pour Julia, 30ans !</a></h6>
|
||||
|
||||
# <time class="video-date" datetime="2019-03-22T10:32:46+00:00">Mar 22, 2019</time>
|
||||
|
||||
# <span> / 5.1k views</span>
|
||||
|
||||
|
||||
|
||||
|
||||
# </div>
|
||||
# </article>
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?.<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>"
|
||||
patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?"
|
||||
patron += "<div class=\"hd\">(.*?)</div>.*?"
|
||||
patron += "<div class=\"duration\">(.*?)</div>.*?"
|
||||
patron += "<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
for scrapedthumbnail, scrapedhd, duration, scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle.strip()
|
||||
|
||||
scrapedtitle = scrapedtitle.strip()
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
|
||||
# Añade al listado
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, thumbnail=thumbnail, fanart=thumbnail,
|
||||
fulltitle=title, url=url,
|
||||
|
||||
@@ -56,7 +56,7 @@ def catalogo(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -84,7 +84,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
@@ -92,11 +92,12 @@ def lista(item):
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace(", ", " & ").replace("(", "(").replace(")", ")")
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = title))
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -107,7 +108,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" type="video/mp4" label="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
|
||||
@@ -15,9 +15,9 @@ host = 'https://watchpornfree.ws'
|
||||
def mainlist(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodia" , action="lista", url=host + "/category/parodies-hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
@@ -37,8 +37,7 @@ def search(item, texto):
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
# <li class="cat-item cat-item-6"><a href="https://watchpornfree.ws/category/all-girl" >All Girl</a> (2,777)
|
||||
# </li>
|
||||
|
||||
def categorias(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
@@ -62,13 +61,13 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article class="TPost B">.*?<a href="([^"]+)">.*?src="([^"]+)".*?<div class="Title">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -31,7 +31,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapedurl.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
15
plugin.video.alfa/channels/xxxfreeinhd.json
Normal file
15
plugin.video.alfa/channels/xxxfreeinhd.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "xxxfreeinhd",
|
||||
"name": "xxxfreeinhd",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://watchxxxfreeinhd.com/wp-content/uploads/logo2015%20(1).jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
93
plugin.video.alfa/channels/xxxfreeinhd.py
Normal file
93
plugin.video.alfa/channels/xxxfreeinhd.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'https://watchxxxfreeinhd.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?filtre=date&cat=0"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/?display=tube&filtre=rate"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "search.php?q=%s&language=en&search=Search" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = '<noscript>.*?src="([^"]+)".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?'
|
||||
patron += '<span class="nb_cat border-radius-5">(\d+) videos</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
title = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += '<img width="\d+" height="\d+" src="([^"]+)" class=.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail + "|https://watchxxxfreeinhd.com/"
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=thumbnail, plot=plot, fanart=scrapedthumbnail, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'<div class="video-embed">(.*?)</div>')
|
||||
patron = '<noscript>.*?<iframe src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(action="play", title = "%s", url=url ))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/browse/time/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + "/browse/views/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top_rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/rating/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars", action="catalogo", url=host + "/pornstars/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/alphabetical/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -52,7 +52,7 @@ def catalogo(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -82,7 +82,7 @@ def categorias(item):
|
||||
title = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=scrapedplot) )
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -93,7 +93,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class=\'video-box-image\'.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
@@ -108,7 +108,7 @@ def lista(item):
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -118,7 +118,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'page_params.video.mediaDefinition =.*?"videoUrl":"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://yuuk.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -87,6 +87,9 @@ def getchanneltypes(view="thumb_"):
|
||||
channel_type=channel_type, viewmode="thumbnails",
|
||||
thumbnail=get_thumb("channels_%s.png" % channel_type, view)))
|
||||
|
||||
itemlist.append(Item(title='Comunidad', channel="community", action="mainlist", view=view,
|
||||
category=title, channel_type="all", thumbnail=get_thumb("channels_community.png", view),
|
||||
viewmode="thumbnails"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -119,6 +122,9 @@ def filterchannels(category, view="thumb_"):
|
||||
try:
|
||||
channel_parameters = channeltools.get_channel_parameters(channel)
|
||||
|
||||
if channel_parameters["channel"] == 'community':
|
||||
continue
|
||||
|
||||
# si el canal no es compatible, no se muestra
|
||||
if not channel_parameters["compatible"]:
|
||||
continue
|
||||
|
||||
BIN
plugin.video.alfa/resources/media/themes/default/thumb_add.png
Normal file
BIN
plugin.video.alfa/resources/media/themes/default/thumb_add.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.8 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 38 KiB |
Reference in New Issue
Block a user