63
plugin.video.alfa/channels/pelis123.json
Normal file
63
plugin.video.alfa/channels/pelis123.json
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"id": "pelis123",
|
||||
"name": "Pelis123",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "pelis123.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"ESP",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
} ]
|
||||
}
|
||||
|
||||
359
plugin.video.alfa/channels/pelis123.py
Normal file
359
plugin.video.alfa/channels/pelis123.py
Normal file
@@ -0,0 +1,359 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re, urllib
|
||||
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger, platformtools
|
||||
from core.item import Item
|
||||
from core import httptools, scrapertools, jsontools, tmdb
|
||||
from core import servertools
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
host = 'https://pelis123.tv/'
|
||||
|
||||
|
||||
IDIOMAS = {'LAT': 'LAT', 'ESP':'ESP', 'VOSE': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'fembed', 'directo']
|
||||
list_quality = []
|
||||
|
||||
__channel__='pelis123'
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(item.clone( title = 'Películas', action = 'mainlist_pelis' ))
|
||||
itemlist.append(item.clone( title = 'Series', action = 'mainlist_series' ))
|
||||
|
||||
itemlist.append(item.clone( title = 'Buscar ...', action = 'search', search_type = 'all' ))
|
||||
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def mainlist_pelis(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone( title = 'Nuevas películas', action = 'list_all', url = host + 'film.html', search_type = 'movie' ))
|
||||
itemlist.append(item.clone( title = 'Destacadas', action = 'list_all', url = host + 'featured.html', search_type = 'movie' ))
|
||||
# ~ itemlist.append(item.clone( title = 'Estrenos de cine', action = 'list_all', url = host + 'cinema.html', search_type = 'movie' ))
|
||||
|
||||
itemlist.append(item.clone( title = 'Por género', action = 'generos', search_type = 'movie' ))
|
||||
itemlist.append(item.clone( title = 'Por idioma', action = 'idiomas', search_type = 'movie' ))
|
||||
itemlist.append(item.clone( title = 'Por país', action = 'paises', search_type = 'movie' ))
|
||||
itemlist.append(item.clone( title = 'Por año', action = 'anios', search_type = 'movie' ))
|
||||
|
||||
itemlist.append(item.clone( title = 'Buscar película ...', action = 'search', search_type = 'movie' ))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def mainlist_series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone( title = 'Nuevas series', action = 'list_all', url = host + 'series.html', search_type = 'tvshow' ))
|
||||
|
||||
itemlist.append(item.clone( title = 'Por género', action = 'generos', search_type = 'tvshow' ))
|
||||
|
||||
itemlist.append(item.clone( title = 'Buscar serie ...', action = 'search', search_type = 'tvshow' ))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def anios(item):
|
||||
logger.info()
|
||||
return extraer_opciones(item, 'year')
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
return extraer_opciones(item, 'genre')
|
||||
|
||||
def idiomas(item):
|
||||
logger.info()
|
||||
return extraer_opciones(item, 'lang')
|
||||
|
||||
def paises(item):
|
||||
logger.info()
|
||||
return extraer_opciones(item, 'country')
|
||||
|
||||
def extraer_opciones(item, select_id):
|
||||
itemlist = []
|
||||
|
||||
url = host + 'search.html'
|
||||
data = httptools.downloadpage(url).data
|
||||
# ~ logger.debug(data)
|
||||
url += '?type=' + ('series' if item.search_type == 'tvshow' else 'movies')
|
||||
url += '&order=last_update&order_by=desc'
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '<select name="%s"[^>]*>(.*?)</select>' % select_id)
|
||||
|
||||
matches = re.compile('<option value="([^"]+)">([^<]+)', re.DOTALL).findall(bloque)
|
||||
for valor, titulo in matches:
|
||||
itemlist.append(item.clone( title=titulo.capitalize(), url= url + '&' + select_id + '=' + valor, action='list_all' ))
|
||||
|
||||
if select_id == 'year': # años en orden inverso
|
||||
return sorted(itemlist, key=lambda it: it.title, reverse=True)
|
||||
else:
|
||||
return sorted(itemlist, key=lambda it: it.title)
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def detectar_idiomas(txt):
|
||||
languages = []
|
||||
if 'Castellano' in txt: languages.append('ESP')
|
||||
if 'Latino' in txt: languages.append('LAT')
|
||||
if 'Subtitulado' in txt: languages.append('VOSE')
|
||||
return languages
|
||||
|
||||
def detectar_idioma(txt):
|
||||
languages = detectar_idiomas(txt)
|
||||
if len(languages) > 0: return languages[0]
|
||||
return '?'
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
es_busqueda = '&q=' in item.url
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# ~ logger.debug(data)
|
||||
|
||||
patron = '<div class="tray-item" episode-tag="([^"]+)">\s*<div class="tray-item-content">'
|
||||
patron += '\s*<a href="([^"]+)">\s*<img class="[^"]*" src="([^"]+)">'
|
||||
patron += '.*?<div class="tray-item-title">([^<]+)</div>'
|
||||
patron += '.*?<div class="tray-item-title-en">([^<]+)</div>'
|
||||
patron += '.*?<div class="tray-item-quality">([^<]+)</div>'
|
||||
patron += '.*?<div class="tray-item-episode">([^<]+)</div>'
|
||||
patron += '.*? data-original-title=".*? \((\d+)\)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for langs, url, thumb, title, title_en, quality, episode, year in matches:
|
||||
th = scrapertools.find_single_match(thumb, r'poster%2F(.*?)$')
|
||||
thumb = 'https://cdn.pelis123.tv/poster/' + th
|
||||
|
||||
languages = detectar_idiomas(langs)
|
||||
|
||||
tipo = 'movie' if 'MIN' in episode else 'tvshow'
|
||||
if item.search_type not in ['all', tipo]: continue
|
||||
|
||||
if tipo == 'tvshow':
|
||||
m = re.match('(.*?) S\d+$', title)
|
||||
if m: title = m.group(1)
|
||||
|
||||
title = title.strip()
|
||||
quality = quality.strip().upper()
|
||||
|
||||
titulo = title
|
||||
if len(languages) > 0:
|
||||
titulo += ' [COLOR pink][%s][/COLOR]' % ','.join(languages)
|
||||
if quality != '':
|
||||
titulo += ' [COLOR pink][%s][/COLOR]' % quality
|
||||
if item.search_type == 'all':
|
||||
titulo += ' [COLOR %s](%s)[/COLOR]' % ('red' if tipo == 'tvshow' else 'green', tipo)
|
||||
|
||||
if tipo == 'movie':
|
||||
itemlist.append(item.clone( action='findvideos', url=url, title=titulo, thumbnail=thumb,
|
||||
contentType='movie', contentTitle=title, infoLabels={'year': year} ))
|
||||
else:
|
||||
if es_busqueda: # descartar series que se repiten con diferentes temporadas
|
||||
if title in [it.contentSerieName for it in itemlist]: continue
|
||||
|
||||
itemlist.append(item.clone( action='temporadas', url=url, title=titulo, thumbnail=thumb,
|
||||
contentType='tvshow', contentSerieName=title, infoLabels={'year': year} ))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
next_page_link = scrapertools.find_single_match(data, 'active">\d+</a>(?:\s*</div>\s*<div class="btn-group">|)\s*<a href="([^"]+)')
|
||||
if next_page_link:
|
||||
itemlist.append(item.clone( title='>> Página siguiente', url=next_page_link ))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# ~ logger.debug(data)
|
||||
|
||||
matches = re.compile('href="([^"]+)" class="[^"]*">Temporada (\d+)</a>', re.DOTALL).findall(data)
|
||||
for url, numtempo in matches:
|
||||
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % numtempo, url = url,
|
||||
contentType='season', contentSeason=numtempo ))
|
||||
|
||||
m = re.match('.*?-season-(\d+)-[a-z0-9A-Z]+-[a-z0-9A-Z]+\.html$', item.url)
|
||||
if m:
|
||||
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % m.group(1), url = item.url,
|
||||
contentType='season', contentSeason=m.group(1) ))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# if len(itemlist) == 1:
|
||||
# itemlist = seasons_episodes(itemlist[0])
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
|
||||
|
||||
|
||||
# return sorted(itemlist, key=lambda it: it.title)
|
||||
return itemlist
|
||||
|
||||
|
||||
# ~ # Si una misma url devuelve los episodios de todas las temporadas, definir rutina tracking_all_episodes para acelerar el scrap en trackingtools.
|
||||
# ~ def tracking_all_episodes(item):
|
||||
# ~ return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = temporadas(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseason(tempitem)
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodesxseason(item):
|
||||
# def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# ~ logger.debug(data)
|
||||
|
||||
url = scrapertools.find_single_match(data, 'href="([^"]+)" action="watch"')
|
||||
data = httptools.downloadpage(url).data
|
||||
# ~ logger.debug(data)
|
||||
|
||||
patron = '<div class="watch-playlist-item(?: playing|) " data-season="(\d+)" data-episode="(\d+)">'
|
||||
patron += '\s*<a href="([^"]+)"'
|
||||
patron += '.*?<img src="([^"]+)"'
|
||||
patron += '.*?<span class="watch-playlist-title">([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for season, episode, url, thumb, title in matches:
|
||||
if item.contentSeason and item.contentSeason != int(season):
|
||||
continue
|
||||
|
||||
titulo = '%sx%s %s' % (season, episode, title)
|
||||
itemlist.append(item.clone( action='findvideos', url=url, title=titulo, thumbnail=thumb,
|
||||
contentType='episode', contentSeason=season, contentEpisodeNumber=episode ))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def detectar_server(servidor):
|
||||
servidor = servidor.lower()
|
||||
if 'server ' in servidor: return 'directo'
|
||||
elif servidor == 'fast': return 'fembed'
|
||||
# ~ elif 'server 1' in servidor: return 'fastproxycdn' # inexistente
|
||||
# ~ elif 'server 4' in servidor: return '404' # error 404 !?
|
||||
return servidor
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# ~ logger.debug(data)
|
||||
|
||||
token = scrapertools.find_single_match(data, '<meta name="csrf-token" content="([^"]+)')
|
||||
|
||||
# ~ https://pelis123.tv/watch/blackkklansman-2018-ocffc-ux2.html
|
||||
# ~ https://pelis123.tv/watch/lethal-weapon-season-1-episode-18-oa06e-fds.html
|
||||
movie_id = scrapertools.find_single_match(item.url, '([a-z0-9A-Z]+-[a-z0-9A-Z]+)\.html$')
|
||||
m = re.match('.*?-episode-(\d+)-[a-z0-9A-Z]+-[a-z0-9A-Z]+\.html$', item.url)
|
||||
episode = m.group(1) if m else ''
|
||||
|
||||
url = host + 'ajax/watch/list'
|
||||
post = 'movie_id=%s&episode=%s' % (movie_id, episode)
|
||||
headers = { 'X-CSRF-TOKEN': token }
|
||||
data = jsontools.load(httptools.downloadpage(url, post=post, headers=headers).data)
|
||||
# ~ logger.debug(data)
|
||||
for idioma, enlaces in data['list'].items():
|
||||
for servidor, url in enlaces.items():
|
||||
titulo = detectar_server(servidor)
|
||||
titulo += ' [%s]' % detectar_idioma(idioma)
|
||||
titulo += item.quality
|
||||
for url_play in url:
|
||||
itemlist.append(item.clone( channel = item.channel, action = 'play', server = detectar_server(servidor),
|
||||
title = titulo, url = url_play,
|
||||
language = detectar_idioma(idioma), quality = 'HD' #, other = servidor
|
||||
))
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
|
||||
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, ignore_response_code=True).data
|
||||
# ~ logger.debug(data)
|
||||
|
||||
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)')
|
||||
if url == '':
|
||||
url = scrapertools.find_single_match(data, '<source src="([^"]+)')
|
||||
|
||||
if 'fastproxycdn.net' in url: url = '' # ya no existe
|
||||
|
||||
# ~ logger.debug(url)
|
||||
if url != '':
|
||||
itemlist.append(item.clone(url = url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("texto: %s" % texto)
|
||||
if item.search_type == "" :
|
||||
item.search_type = 'all'
|
||||
try:
|
||||
item.url = host + 'search.html'
|
||||
item.url += '?type=' + ('series' if item.search_type == 'tvshow' else 'movies' if item.search_type == 'movie' else '')
|
||||
item.url += '&order=last_update&order_by=desc'
|
||||
item.url += '&q=' + texto.replace(" ", "+")
|
||||
return list_all(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
@@ -15,11 +15,11 @@ host = 'http://pornboss.org'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/category/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/category/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/movies/"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/clips/"))
|
||||
itemlist.append( Item(channel=item.channel, title=" categorias" , action="peliculas", url=host + "/category/clips/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips/"))
|
||||
itemlist.append( Item(channel=item.channel, title=" categorias" , action="lista", url=host + "/category/clips/"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -51,23 +51,27 @@ def categorias(item):
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<article id="post-\d+".*?<img class="center cover" src="([^"]+)" alt="([^"]+)".*?<blockquote>.*?<a href=\'([^\']+)\''
|
||||
patron = '<article id="post-\d+".*?'
|
||||
patron += '<img class="center cover" src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<blockquote>.*?<a href=\'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedthumbnail,scrapedtitle,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
def play(item):
|
||||
|
||||
@@ -2,26 +2,23 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://sexofilm.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/xtreme-adult-wing/adult-dvds/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parody" , action="peliculas", url=host + "/xtreme-adult-wing/porn-parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/xtreme-adult-wing/porn-clips-movie-scene/"))
|
||||
itemlist.append( Item(channel=item.channel, title="SexMUSIC" , action="peliculas", url=host + "/topics/sexo-music-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Xshows" , action="peliculas", url=host + "/xshows/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/xtreme-adult-wing/adult-dvds/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parody" , action="lista", url=host + "/xtreme-adult-wing/porn-parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/xtreme-adult-wing/porn-clips-movie-scene/"))
|
||||
itemlist.append( Item(channel=item.channel, title="SexMUSIC" , action="lista", url=host + "/topics/sexo-music-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Xshows" , action="lista", url=host + "/xshows/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -32,7 +29,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url =host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -44,11 +41,17 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="tagcloud">(.*?)<p>')
|
||||
if item.title == "Canal" :
|
||||
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">')
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Big tit", url="https://sexofilm.com/?s=big+tits"))
|
||||
|
||||
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -56,11 +59,11 @@ def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
|
||||
data = scrapertools.get_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">')
|
||||
patron = '<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
@@ -72,24 +75,25 @@ def anual(item):
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
plot = ""
|
||||
title = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "")
|
||||
itemlist.append(item.clone(action="play", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail, plot=plot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -103,3 +107,4 @@ def play(item):
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -7,21 +7,16 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
|
||||
host = 'https://es.spankbang.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos", action="peliculas", url= host + "/new_videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas valorados", action="peliculas", url=host + "/trending_videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos", action="peliculas", url= host + "/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largos", action="peliculas", url= host + "/longest_videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos", action="lista", url= host + "/new_videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas valorados", action="lista", url=host + "/trending_videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos", action="lista", url= host + "/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largos", action="lista", url= host + "/longest_videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -32,7 +27,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/s/%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -51,12 +46,12 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl ,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -70,18 +65,18 @@ def peliculas(item):
|
||||
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="i-hd">(.*?)</span>')
|
||||
duration = scrapertools.find_single_match(scrapedtime, '<i class="fa fa-clock-o"></i>(.*?)</span>')
|
||||
if scrapedhd != '':
|
||||
title = "[COLOR yellow]" +duration+ " min[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
|
||||
title = "[COLOR yellow]" + duration + " min[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
|
||||
else:
|
||||
title = "[COLOR yellow]" + duration + " min[/COLOR] " + scrapedtitle
|
||||
thumbnail = "http:" + scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title ))
|
||||
fanart=thumbnail, plot=plot, contentTitle=title) )
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)">')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue",
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>" , text_color="blue",
|
||||
url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -7,16 +7,14 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://streamingporn.xyz'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/category/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/stream/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/category/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/stream/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -28,7 +26,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -48,8 +46,8 @@ def catalogo(item):
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl ,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -67,12 +65,12 @@ def categorias(item):
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl ,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -82,17 +80,21 @@ def peliculas(item):
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
if 'HD' in scrapedtitle :
|
||||
calidad = scrapertools.find_single_match(scrapedtitle, '(\d+)p')
|
||||
title = "[COLOR red]" + "HD" +"[/COLOR] "+ scrapedtitle
|
||||
if calidad :
|
||||
title = "[COLOR red]" + "HD" + calidad +" [/COLOR] "+ scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = contentTitle) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="loadMoreInfinite"><a href="(.*?)" >Load More')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" ,
|
||||
text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel , action="lista" , title="Página Siguiente >>" ,
|
||||
text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -7,15 +7,13 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://streamporno.eu'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -27,7 +25,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -42,7 +40,7 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-.*?<a href="([^"]+)">([^"]+)</a>'
|
||||
if item.title == "Categorias":
|
||||
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="peliculas", url=host + "/?s=big+tits"))
|
||||
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="lista", url=host + "/?s=big+tits"))
|
||||
patron = '<li class="cat-item.*?<a href="([^"]+)" >([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -51,11 +49,12 @@ def categorias(item):
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -68,11 +67,11 @@ def peliculas(item):
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -8,25 +8,25 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://www.submityourflicks.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Útimos videos", url="http://www.submityourflicks.com/",
|
||||
viewmode="movie"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
|
||||
url="http://www.submityourflicks.com/index.php?mode=search&q=%s&submit=Search"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url= host))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Mas vistos", url= host + "/most-viewed/"))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Mejor valorados", url= host + "/top-rated/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url= host))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
tecleado = texto.replace(" ", "+")
|
||||
item.url = item.url % tecleado
|
||||
texto = texto.replace(" ", "-")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return videos(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -38,24 +38,21 @@ def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpageGzip(item.url)
|
||||
patron = '<div class="item-block[^<]+'
|
||||
patron += '<div class="inner-block[^<]+'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
|
||||
patron += '<span class="image".*?'
|
||||
patron += '<img.*? data-src="([^"]+)"'
|
||||
patron = '<div class="item-block item-normal col" >.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += '</span> ([^"]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
title = scrapedtitle
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedtime in matches:
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail.replace(" ", "%20")
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
folder=False))
|
||||
next_page_url = scrapertools.find_single_match(data, "<a href='([^']+)' class=\"next\">NEXT</a>")
|
||||
if next_page_url != "":
|
||||
url = urlparse.urljoin(item.url, next_page_url)
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title=">> Página siguiente", url=url, folder=True,
|
||||
viewmode="movie"))
|
||||
fanart=thumbnail))
|
||||
next_page = scrapertools.find_single_match(data, "<a href='([^']+)' class=\"next\">NEXT</a>")
|
||||
if next_page != "":
|
||||
url = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title=">> Página siguiente", url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -67,3 +64,4 @@ def play(item):
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
|
||||
thumbnail=item.thumbnail, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -7,18 +7,17 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.sunporno.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host +"/most-recent/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-viewed/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largas" , action="peliculas", url=host + "/long-movies/date-last-month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/most-recent/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-viewed/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largas" , action="lista", url=host + "/long-movies/date-last-month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="PornStars" , action="catalogo", url=host + "/pornstars/most-viewed/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -29,7 +28,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -49,16 +48,39 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl + "/most-recent/"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="starec">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '<img class="thumb" src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<p class="videos">(\d+)</p>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'<div id="mainThumbsContainer" class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
data = scrapertools.get_match(data,'class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
patron = '<p class="btime">([^"]+)</p>.*?href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for duracion,scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
@@ -67,12 +89,12 @@ def peliculas(item):
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel , action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -84,7 +106,7 @@ def play(item):
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("https:", "http:")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://tabooshare.com'
|
||||
|
||||
@@ -16,7 +14,7 @@ host = 'http://tabooshare.com'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
return itemlist
|
||||
|
||||
@@ -32,11 +30,12 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -45,13 +44,13 @@ def peliculas(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" – Free Porn Download", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<span class="current">.*?<a href="(.*?)"')
|
||||
if next_page_url=="http://NaughtyPorn.net/":
|
||||
next_page_url = scrapertools.find_single_match(data,'<span class="current">.*?<a href=\'(.*?)\'')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<span class="current">.*?<a href="(.*?)"')
|
||||
if next_page=="http://NaughtyPorn.net/":
|
||||
next_page = scrapertools.find_single_match(data,'<span class="current">.*?<a href=\'(.*?)\'')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -7,18 +7,16 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://www.tryboobs.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="peliculas", url=host + "/top-rated/week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="modelos", url=host + "/models/model-viewed/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="lista", url=host + "/top-rated/week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/model-viewed/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -29,7 +27,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -37,44 +35,37 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def modelos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="th-model">.*?src="([^"]+)".*?<span class="roliks"><span>(\d+)</span>.*?<span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="modelos" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="th-cat">.*?<img src="([^"]+)".*?<span>(\d+)</span>.*?<span class="title">([^"]+)</span>'
|
||||
patron = '<a href="([^"]+)" class="th-[^"]+">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<span>(\d+)</span>.*?'
|
||||
patron += '<span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = 'href="([^"]+)"\s*class="th-video.*?<img src="([^"]+)".*?<span class="time">([^"]+)</span>.*?<span class="title">([^"]+)</span>'
|
||||
patron = 'href="([^"]+)"\s*class="th-video.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<span class="time">([^"]+)</span>.*?'
|
||||
patron += '<span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,duracion,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
@@ -82,12 +73,12 @@ def peliculas(item):
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -7,19 +7,15 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
|
||||
host = 'https://tubedupe.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/?sort_by=model_viewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/?sort_by=cs_viewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=avg_videos_popularity"))
|
||||
@@ -32,7 +28,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -44,27 +40,31 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Categorias" or "Canal" :
|
||||
patron = '<a href="([^"]+)" class="list-item" title="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<var class="duree">([^"]+) </var>'
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="block-[^"]+">.*?'
|
||||
patron += '<a href="([^"]+)".*?title="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
if '/models/' in item.url:
|
||||
patron += '<span class="strong">Videos</span>(.*?)</div>'
|
||||
else:
|
||||
patron = '<div class="block-pornstar">.*?<a href="([^"]+)" title="([^"]+)" >.*?src="([^"]+)".*?<div class="col-lg-4-fixed nb-videos">.*?<br>(\d+)</div>'
|
||||
patron += '<var class="duree">([^"]+) </var>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
cantidad = cantidad.strip()
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="categorias", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -78,12 +78,12 @@ def peliculas(item):
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail,plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -2,25 +2,21 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://free-porn-videos.xyz'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/topics/porn-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parody" , action="peliculas", url=host + "/topics/free-porn-parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="BigTits" , action="peliculas", url=host + "/?s=big+tit"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/topics/adult-movie/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parody" , action="lista", url=host + "/topics/free-porn-parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/topics/porn-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="BigTits" , action="lista", url=host + "/?s=big+tit"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -30,7 +26,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -38,7 +34,7 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -48,11 +44,12 @@ def peliculas(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("Permalink to Watch ", "").replace("Porn Online", "").replace("Permalink to ", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , contentTitle=scrapedtitle, plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, contentTitle=scrapedtitle, plot=scrapedplot) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -2,17 +2,14 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.vintagetube.club'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -41,14 +38,17 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="prev prev-ct">.*?<a href="(.*?)">.*?<img src="(.*?)".*?<span class="prev-tit">(.*?)</span>'
|
||||
patron = '<div class="prev prev-ct">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<span class="prev-tit">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -56,21 +56,25 @@ def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="prev">.*?<a href="(.*?)">.*?<img src="(.*?)">.*?<span class="prev-tit">(.*?)</span>.*?<div class="prev-dur"><span>(.*?)</span>'
|
||||
patron = '<div class="prev">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)">.*?'
|
||||
patron += '<span class="prev-tit">([^"]+)</span>.*?'
|
||||
patron += '<div class="prev-dur"><span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + str(scrapedtitle)
|
||||
scrapedurl = scrapedurl.replace("/xxx.php?tube=", "")
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
current_page = scrapertools.find_single_match(data,'<li><span class="page">(.*?)</span></li>')
|
||||
next_page = int(current_page) + 1
|
||||
url = item.url
|
||||
url_page = current_page + "/"
|
||||
url = url.replace(url_page, "")
|
||||
next_page_url = url + str(next_page)+"/"
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<span class="page">.*?<a target="_self" href="([^"]+)"')
|
||||
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -80,16 +84,15 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'(.*?)\'')
|
||||
if scrapedurl == "":
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapedurl.replace ("http:", "")
|
||||
data = httptools.downloadpage("http:" + scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
else:
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
data = httptools.downloadpage("https:" + scrapedurl).data
|
||||
media_url = scrapertools.find_single_match(data,'<source src="(.*?)"')
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)"')
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=media_url, fulltitle=media_url, url=media_url,
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.vintagexxxsex.com'
|
||||
|
||||
@@ -16,9 +14,9 @@ host = 'http://www.vintagexxxsex.com'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/all-top/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/all-new/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/all-longest/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/all-top/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host + "/all-new/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/all-longest/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -29,7 +27,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -48,15 +46,20 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="th">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<span class="th_nm">([^"]+)</span>.*?<i class="fa fa-clock-o"></i>([^"]+)</span>'
|
||||
patron = '<div class="th">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<span class="th_nm">([^"]+)</span>.*?'
|
||||
patron += '<i class="fa fa-clock-o"></i>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
|
||||
contentTitle = scrapedtitle
|
||||
@@ -65,21 +68,12 @@ def peliculas(item):
|
||||
scrapedurl = host + scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
|
||||
# else:
|
||||
# patron = '<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"'
|
||||
# next_page = re.compile(patron,re.DOTALL).findall(data)
|
||||
# next_page = item.url + next_page[0]
|
||||
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page[0] , text_color="blue", url=next_page[0] ) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -88,13 +82,13 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="(.*?)"')
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)"')
|
||||
if scrapedurl == "":
|
||||
scrapedurl = "http:" + scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = "http:" + scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'file: "(.*?)"')
|
||||
scrapedurl = scrapertools.find_single_match(data,'file: "([^"]+)"')
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
@@ -16,12 +16,12 @@ host = 'https://www.vporn.com'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/newest/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/views/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor Valoradas" , action="peliculas", url=host + "/rating/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Favoritas" , action="peliculas", url=host + "/favorites/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Votada" , action="peliculas", url=host + "/votes/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host + "/newest/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/views/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor Valoradas" , action="lista", url=host + "/rating/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Favoritas" , action="lista", url=host + "/favorites/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Votada" , action="lista", url=host + "/votes/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="PornStar" , action="catalogo", url=host + "/pornstars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -33,7 +33,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -46,18 +46,21 @@ def catalogo(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class=\'star\'>.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?<span> (\d+) Videos'
|
||||
patron = '<div class=\'star\'>.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<span> (\d+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="catalogo", title="Next page >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -73,27 +76,31 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video">.*?<a href="([^"]+)".*?<span class="time">(.*?)</span>.*?<img src="([^"]+)" alt="([^"]+)"'
|
||||
patron = '<div class="video">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '<span class="time">(.*?)</span>.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
# https://playpornfree.org/ https://mangoporn.net/ https://watchfreexxx.net/ https://losporn.org/ https://xxxstreams.me/ https://speedporn.net/
|
||||
|
||||
@@ -17,9 +15,9 @@ host = 'https://watchpornfree.ws'
|
||||
def mainlist(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodia" , action="peliculas", url=host + "/category/parodies-hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodia" , action="lista", url=host + "/category/parodies-hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
@@ -32,14 +30,15 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# <li class="cat-item cat-item-6"><a href="https://watchpornfree.ws/category/all-girl" >All Girl</a> (2,777)
|
||||
# </li>
|
||||
def categorias(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
@@ -50,15 +49,17 @@ def categorias(item):
|
||||
data = scrapertools.get_match(data,'>Years</a>(.*?)</ul>')
|
||||
if item.title == "Categorias":
|
||||
data = scrapertools.get_match(data,'>XXX Genres</div>(.*?)</ul>')
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
patron = '<a href="([^"]+)".*?>([^"]+)</a>(.*?)</li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + cantidad
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
@@ -66,10 +67,11 @@ def peliculas(item):
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -2,35 +2,31 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.webpeliculasporno.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("pelisalacarta.webpeliculasporno mainlist")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url= host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url= host + "/?display=tube&filtre=rate"))
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="lista", url= host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url= host + "/?display=tube&filtre=rate"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("pelisalacarta.gmobi mainlist")
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -46,16 +42,17 @@ def categorias(item):
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
@@ -64,11 +61,11 @@ def peliculas(item):
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="next page-numbers" href="([^"]+)">Next')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="next page-numbers" href="([^"]+)">Next')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ def lista(item):
|
||||
title = scrapedtitle
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">»</a></li>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -64,7 +64,7 @@ def play(item):
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapedurl.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"(.*?)"')
|
||||
scrapedurl = scrapertools.find_single_match(data,'"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"([^"]+)"')
|
||||
scrapedurl = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
@@ -8,19 +8,18 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.xozilla.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/models/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -31,7 +30,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -39,40 +38,42 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<img class="thumb" src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
thumbnail = "http:" + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=thumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<img class="thumb" src="([^"]+)".*?</i> (\d+) videos</div>'
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img class="thumb" src="([^"]+)".*?'
|
||||
patron += '(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
cantidad = scrapertools.find_single_match(cantidad,'(\d+) videos</div>')
|
||||
if cantidad:
|
||||
scrapedtitle += " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="#videos":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
if next_page=="#videos":
|
||||
next_page = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
|
||||
next_page = urlparse.urljoin(item.url,next_page) + "/"
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="item.*?data-original="([^"]+)".*?alt="([^"]+)".*?<div class="duration">(.*?)</div>'
|
||||
patron = '<a href="([^"]+)" class="item.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += '<div class="duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = scrapedurl
|
||||
@@ -81,15 +82,16 @@ def peliculas(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page_url!="#videos":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
if next_page_url=="#videos":
|
||||
next_page_url = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url) + "/"
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="#videos":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
if next_page=="#videos":
|
||||
next_page = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
|
||||
next_page = urlparse.urljoin(item.url,next_page) + "/"
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -2,27 +2,24 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://hd.xtapes.to'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/full-porn-movies/?display=tube&filtre=date"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas Estudio" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/?filtre=date&cat=0"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="peliculas", url=host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/?display=tube&filtre=rate"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/?display=tube&filtre=duree"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/full-porn-movies/?display=tube&filtre=date"))
|
||||
itemlist.append( Item(channel=item.channel, title="Productora" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?filtre=date&cat=0"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="lista", url=host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="lista", url=host + "/?display=tube&filtre=rate"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/?display=tube&filtre=duree"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -33,7 +30,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -41,33 +38,17 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title=="Canal":
|
||||
data = scrapertools.get_match(data,'<div class="footer-banner">(.*?)<div id="footer-copyright">')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<li id="menu-item-16"(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">([^"]+)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<a>Categories</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if item.title=="Canal":
|
||||
data = scrapertools.get_match(data,'<div class="footer-banner">(.*?)<div id="footer-copyright">')
|
||||
if item.title=="Productora" :
|
||||
data = scrapertools.get_match(data,'<li id="menu-item-16"(.*?)</ul>')
|
||||
if item.title=="Categorias" :
|
||||
data = scrapertools.get_match(data,'<a>Categories</a>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -75,16 +56,19 @@ def categorias(item):
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?<div class="time-infos".*?>([^"]+)<span class="time-img">'
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<div class="time-infos".*?>([^"]+)<span class="time-img">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -93,13 +77,14 @@ def peliculas(item):
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle = title, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next video')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
next_page_url = next_page_url.replace("#038;cat=0#038;", "").replace("#038;filtre=views#038;", "").replace("#038;filtre=rate#038;", "").replace("#038;filtre=duree#038;", "")
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, fulltitle = title, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next video')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
next_page = next_page.replace("#038;cat=0#038;", "")
|
||||
next_page = next_page.replace("#038;filtre=views#038;", "").replace("&filtre=rate#038;", "&").replace("#038;filtre=duree#038;", "")
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -8,20 +8,16 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://xxxdan.com'
|
||||
|
||||
#NO SE REPRODUCE EL VIDEO QUE ENCUENTRA
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular30"))
|
||||
itemlist.append( Item(channel=item.channel, title="Dururacion" , action="peliculas", url=host + "/longest"))
|
||||
itemlist.append( Item(channel=item.channel, title="HD" , action="peliculas", url=host + "/channel30/hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular30"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest"))
|
||||
itemlist.append( Item(channel=item.channel, title="HD" , action="lista", url=host + "/channel30/hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -32,7 +28,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?query=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -40,58 +36,54 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" rel="tag".*?title="([^"]+)".*?data-original="([^"]+)".*?<span class="score">(\d+)</span>'
|
||||
patron = '<a href="([^"]+)" rel="tag".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<span class="score">(\d+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = scrapedurl.replace("channel", "channel30")
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><figure>\s*<a href="([^"]+)" class="img\s*" title="([^"]+)".*?data-original="([^"]+)".*?<time datetime="\w+">([^"]+)</time>'
|
||||
|
||||
|
||||
patron = '<li><figure>\s*<a href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<time datetime="\w+">([^"]+)</time>'
|
||||
patron += '(.*?)</ul>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
for scrapedurl,scrapedthumbnail,duracion,calidad in matches:
|
||||
url = scrapedurl
|
||||
scrapedtitle = scrapertools.find_single_match(scrapedurl,'https://xxxdan.com/es/.*?/(.*?).html')
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
if '<li class="hd">' in calidad :
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">→</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = next_page_url.replace("http://xxxdan.com/","")
|
||||
next_page_url = "/" + next_page_url
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">→</a>')
|
||||
if next_page!="":
|
||||
next_page = next_page.replace("http://xxxdan.com/","")
|
||||
next_page = "/" + next_page
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -12,14 +11,13 @@ from core import tmdb
|
||||
|
||||
host = 'https://xxxparodyhd.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/genre/new-release/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodias" , action="peliculas", url=host + "/genre/parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/genre/clips-scenes/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/genre/clips-scenes/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/genre/new-release/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodias" , action="lista", url=host + "/genre/parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/categories"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -31,7 +29,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -46,29 +44,34 @@ def categorias(item):
|
||||
if item.title == "Canal" :
|
||||
data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<div class=\'sub-container\' style=\'display: none;\'><ul class=\'sub-menu\'>(.*?)</ul>')
|
||||
data = scrapertools.get_match(data,'>Categories</a>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div data-movie-id="\d+" class="ml-item">.*?<a href="([^"]+)".*?oldtitle="([^"]+)".*?<img src="([^"]+)".*?rel="tag">(.*?)</a>'
|
||||
patron = '<div data-movie-id="\d+" class="ml-item">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'oldtitle="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?rel="tag">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,year in matches:
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedyear in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = str(scrapedtitle) + " " + year
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, infoLabels={'year':scrapedyear}) )
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
next_page = scrapertools.find_single_match(data,'<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -2,24 +2,21 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://xxxstreams.org'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url= host + "/category/full-porn-movie-stream/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Clips" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/category/full-porn-movie-stream/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url= host + "/category/full-porn-movie-stream/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Clips" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -29,7 +26,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -40,36 +37,48 @@ def search(item, texto):
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<li id="menu-item.*?class="menu-item menu-item-type-taxonomy.*?<a href="([^<]+)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data1 = scrapertools.get_match(data,'<h5>Popular Categories<br />(.*?)</aside>')
|
||||
if item.title == "Canal" :
|
||||
data1 = scrapertools.get_match(data,'>Top sites</a>(.*?)</ul>')
|
||||
data1 += scrapertools.get_match(data,'Downloads</h2>(.*?)</ul>')
|
||||
patron = '<a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data1)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<div class="entry-content">.*?<img src="([^"]+)".*?<a href="([^<]+)".*?<span class="screen-reader-text">(.*?)</span>'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="entry-content">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<a href="([^<]+)".*?'
|
||||
patron += '<span class="screen-reader-text">(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next →</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
if '/HD' in scrapedtitle : title= "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
|
||||
elif 'FullHD' in scrapedtitle : title= "[COLOR red]" + "FullHD" + "[/COLOR] " + scrapedtitle
|
||||
elif '1080' in scrapedtitle : title= "[COLOR red]" + "1080p" + "[/COLOR] " + scrapedtitle
|
||||
else: title = scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next →</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista" , title="Next page >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'--more--></p>(.*?)/a></p>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)".*?class="external">(.*?)<'
|
||||
@@ -77,13 +86,14 @@ def findvideos(item):
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=item.title, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=item.title,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -7,17 +7,15 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.youjizz.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest-clips/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated-week/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest-clips/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated-week/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -28,7 +26,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s-1.html" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -42,7 +40,7 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h4>Trending Categories</h4>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Big Tits" , url="https://www.youjizz.com/search/big-tits-1.html?" , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="big tits", url= host + "/search/big-tits-1.html?") )
|
||||
patron = '<li><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -50,29 +48,41 @@ def categorias(item):
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video-item">.*?class="frame image" href="([^"]+)".*?data-original="([^"]+)" />.*?<div class="video-title">.*?>(.*?)</a>.*?<span class="time">(.*?)</span>'
|
||||
patron = '<div class="video-item">.*?'
|
||||
patron += 'class="frame image" href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)" />.*?'
|
||||
patron += '<div class="video-title">.*?'
|
||||
patron += '>(.*?)</a>.*?'
|
||||
patron += '<span class="time">(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
quality= ""
|
||||
if '-720-' in scrapedthumbnail : quality = "720"
|
||||
if '-1080-' in scrapedthumbnail : quality = "1080"
|
||||
if quality:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = "http:" + scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pagination-next" href="([^"]+)">Next »</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, quality= quality, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pagination-next" href="([^"]+)">Next »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>" , text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -80,7 +90,14 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
media_url = scrapertools.find_single_match(data, '"filename"\:"(.*?)"')
|
||||
data = scrapertools.get_match(data,'var encodings(.*?)var')
|
||||
if '360' in data:
|
||||
patron = '"360".*?"filename"\:"(.*?)"'
|
||||
if '720' in data:
|
||||
patron = '"720".*?"filename"\:"(.*?)"'
|
||||
if '1080' in data:
|
||||
patron = '"1080".*?"filename"\:"(.*?)"'
|
||||
media_url = scrapertools.find_single_match(data, patron)
|
||||
media_url = "https:" + media_url.replace("\\", "")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
|
||||
@@ -7,19 +7,18 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.youporn.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/browse/time/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/browse/views/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top_rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/alphabetical/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/browse/time/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + "/browse/views/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top_rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/rating/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars", action="catalogo", url=host + "/pornstars/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/alphabetical/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -29,7 +28,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?query=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -41,19 +40,23 @@ def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<a href="/pornstars/most_popular/" class="selected">All</a>(.*?)<i class=\'icon-menu-right\'></i></a>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)".*?data-original="([^"]+)".*?<span class="porn-star-name">([^"]+)</span>.*?<span class="video-count">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
data1 = scrapertools.get_match(data,'>Most Popular Pornstars<(.*?)<i class=\'icon-menu-right\'></i></a>')
|
||||
patron = '<a href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<span class="porn-star-name">([^"]+)</span>.*?'
|
||||
patron += '<span class="video-count">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data1)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" data-page-number=.*?>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -61,25 +64,41 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class=\'row alphabetical\'.*?>(.*?)<h2 class="heading4">Popular by Country</h2>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)".*?data-original="([^"]+)".*?<p>([^"]+)<span>([^"]+)</span>'
|
||||
if item.title == "Canal":
|
||||
data = scrapertools.get_match(data,'>All</div>(.*?)<i class=\'icon-menu-right\'></i></a>')
|
||||
if item.title == "Categorias":
|
||||
data = scrapertools.get_match(data,'<div class=\'row alphabetical\'.*?>(.*?)>Popular by Country</h2>')
|
||||
patron = '<a href="([^"]+)".*?'
|
||||
patron += '<img src=(.*?)>.*?'
|
||||
patron += '>([^<]+) (?:Videos|videos)<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
for scrapedurl,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
thumbnail = scrapertools.find_single_match(scrapedthumbnail,'data-original="([^"]+)"')
|
||||
scrapedtitle = scrapertools.find_single_match(scrapedthumbnail,'alt="([^"]+)"')
|
||||
if scrapedtitle == "" :
|
||||
scrapedtitle = scrapertools.find_single_match(scrapedthumbnail,'alt=\'([^\']+)\'')
|
||||
title = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class=\'video-box-image\'.*?data-original="([^"]+)".*?<div class="video-box-title">([^"]+)</div>.*?<div class="video-duration">(.*?)</div>'
|
||||
patron = '<a href="([^"]+)" class=\'video-box-image\'.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<div class="video-box-title">([^"]+)</div>.*?'
|
||||
patron += '<div class="video-duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -87,12 +106,12 @@ def peliculas(item):
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="prev-next"><a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -2,21 +2,18 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://yuuk.net'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-genres/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
@@ -27,7 +24,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host+ "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -39,34 +36,40 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist.append( Item(channel=item.channel, title="Censored" , action="peliculas", url=host + "/category/censored/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Uncensored" , action="peliculas", url=host + "/category/uncensored/"))
|
||||
patron = '<li><a href="([^"]+)" title="[^"]+"><span>([^"]+)</span><span>([^"]+)</span></a></li>'
|
||||
itemlist.append( Item(channel=item.channel, title="Censored" , action="lista", url=host + "/category/censored/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Uncensored" , action="lista", url=host + "/category/uncensored/"))
|
||||
patron = '<li><a href="([^"]+)" title="[^"]+"><span>([^"]+)</span><span>([^"]+)</span></a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?>#([^"]+) Full HD JAV</a>'
|
||||
patron = '<div class="featured-wrap clearfix">.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '>#([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,calidad in matches:
|
||||
scrapedplot = ""
|
||||
calidad = calidad.replace(" Full HD JAV", "")
|
||||
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class=\'current\'>.*?<a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
BIN
plugin.video.alfa/resources/media/channels/thumb/pelis123.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/thumb/pelis123.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.3 KiB |
Reference in New Issue
Block a user