@@ -1,69 +0,0 @@
|
||||
{
|
||||
"id": "cuevana3",
|
||||
"name": "Cuevana 3",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://www.cuevana3.co/wp-content/themes/cuevana3/public/img/cnt/cuevana3.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"CAST",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
88
plugin.video.alfa/addon.xml
Executable file → Normal file
88
plugin.video.alfa/addon.xml
Executable file → Normal file
@@ -1,45 +1,43 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.31" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
</requires>
|
||||
<extension point="xbmc.python.pluginsource" library="default.py">
|
||||
<provides>video</provides>
|
||||
</extension>
|
||||
<extension point="xbmc.addon.metadata">
|
||||
<summary lang="es">Navega con Kodi por páginas web.</summary>
|
||||
<assets>
|
||||
<icon>logo-cumple.png</icon>
|
||||
<fanart>fanart1.jpg</fanart>
|
||||
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/4.jpg</screenshot>
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ maxipelis24 ¤ cuevana3 ¤ pelisplusco
|
||||
¤ mejortorrent ¤ newpct1
|
||||
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ Mundopelis ¤ thevideobee ¤ tusfiles
|
||||
¤ vup
|
||||
|
||||
¤ Agradecimientos a @mac12m99 y @chivmalev por colaborar con ésta versión
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>
|
||||
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]</disclaimer>
|
||||
<platform>all</platform>
|
||||
<license>GNU GPL v3</license>
|
||||
<forum>foro</forum>
|
||||
<website>web</website>
|
||||
<email>my@email.com</email>
|
||||
<source>https://github.com/alfa-addon/addon</source>
|
||||
</extension>
|
||||
<extension point="xbmc.service" library="videolibrary_service.py" start="login|startup">
|
||||
</extension>
|
||||
</addon>
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.8.2" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
</requires>
|
||||
<extension point="xbmc.python.pluginsource" library="default.py">
|
||||
<provides>video</provides>
|
||||
</extension>
|
||||
<extension point="xbmc.addon.metadata">
|
||||
<summary lang="es">Navega con Kodi por páginas web.</summary>
|
||||
<assets>
|
||||
<icon>logo-cumple.png</icon>
|
||||
<fanart>fanart1.jpg</fanart>
|
||||
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/4.jpg</screenshot>
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ allcalidad ¤ animeflv ¤ streamcloud
|
||||
¤ pack +18 ¤ divxtotal ¤ elitetorrent
|
||||
¤ estrenosgo ¤ mejortorrent ¤ mejortorrent1
|
||||
¤ newpct1 ¤ pelismagnet
|
||||
|
||||
Agradecimientos a @shlibidon y @nyicris por colaborar con esta versión
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>
|
||||
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]</disclaimer>
|
||||
<platform>all</platform>
|
||||
<license>GNU GPL v3</license>
|
||||
<forum>foro</forum>
|
||||
<website>web</website>
|
||||
<email>my@email.com</email>
|
||||
<source>https://github.com/alfa-addon/addon</source>
|
||||
</extension>
|
||||
<extension point="xbmc.service" library="videolibrary_service.py" start="login|startup">
|
||||
</extension>
|
||||
</addon>
|
||||
|
||||
@@ -46,7 +46,7 @@ def categorias(item):
|
||||
scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -57,7 +57,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item">.*?'
|
||||
patron += '<a href="([^"]+)" title="(.*?)">.*?'
|
||||
@@ -72,7 +72,7 @@ def lista(item):
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -82,7 +82,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
|
||||
@@ -109,7 +109,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -70,7 +70,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -19,7 +19,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'v
|
||||
|
||||
__channel__='allcalidad'
|
||||
|
||||
host = "https://allcalidad.net/"
|
||||
host = "https://allcalidad.io/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
@@ -29,6 +29,12 @@ except:
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
import ast
|
||||
from core import jsontools
|
||||
data = '{"country_code":"PE","country_name":"Peru","city":null,"postal":null,"latitude":-12.0433,"longitude":-77.0283,"IPv4":"190.41.210.15","state":null}'
|
||||
data = data.replace("null",'"null"')
|
||||
logger.info("Intel22 %s" %data)
|
||||
user_loc = ast.literal_eval(data)
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True)))
|
||||
|
||||
@@ -33,7 +33,7 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65
|
||||
list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo',
|
||||
'fastplay', 'raptu', 'tusfiles']
|
||||
|
||||
host = "http://allpeliculas.io/"
|
||||
host = "https://allpeliculas.io/"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
@@ -14,6 +14,7 @@ host = 'http://www.alsoporn.com'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/en/g/All/new/1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/g/All/top/1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -33,23 +34,6 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a href="([^"]+)" title="">.*?'
|
||||
patron += '<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -62,14 +46,14 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="alsoporn_prev">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
@@ -82,7 +66,8 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -93,12 +78,12 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
|
||||
data = scrapertools.cachePage(scrapedurl)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.trinitytube.xyz/ec/i2.php?")
|
||||
data = scrapertools.cachePage(scrapedurl1)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)"')
|
||||
itemlist.append(item.clone(action="play", title=item.title, fulltitle = item.title, url=scrapedurl2))
|
||||
return itemlist
|
||||
|
||||
@@ -41,7 +41,7 @@ def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<strong class="popup-title">Canales</strong>(.*?)<strong>Models</strong>')
|
||||
data = scrapertools.find_single_match(data,'<strong class="popup-title">Canales</strong>(.*?)<strong>Models</strong>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -49,7 +49,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -71,8 +71,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -91,7 +91,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = title))
|
||||
fanart=thumbnail, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -215,16 +215,18 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
|
||||
videos = scrapertools.find_single_match(data, 'var videos = (.*?);')
|
||||
|
||||
videos_json = jsontools.load(videos)
|
||||
for video_lang in videos_json.items():
|
||||
language = video_lang[0]
|
||||
matches = scrapertools.find_multiple_matches(str(video_lang[1]), 'src="([^"]+)"')
|
||||
for source in matches:
|
||||
new_data = httptools.downloadpage(source).data
|
||||
if 'redirector' in source:
|
||||
matches = scrapertools.find_multiple_matches(str(video_lang[1]), "code': '(.*?)'")
|
||||
|
||||
for source in matches:
|
||||
url = source
|
||||
if 'redirector' in source:
|
||||
new_data = httptools.downloadpage(source).data
|
||||
url = scrapertools.find_single_match(new_data, 'window.location.href = "([^"]+)"')
|
||||
elif 'embed' in source:
|
||||
elif 'animeflv.net/embed' in source:
|
||||
source = source.replace('embed', 'check')
|
||||
new_data = httptools.downloadpage(source).data
|
||||
json_data = jsontools.load(new_data)
|
||||
|
||||
@@ -32,7 +32,7 @@ def mainlist(item):
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/lista-de-anime.php",
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/catalogo.php?g=&t=series&o=0",
|
||||
thumbnail=thumb_series, range=[0,19]))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", contentTitle="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
|
||||
thumbnail=thumb_series, range=[0,19] ))
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from lib import jsunpack
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
@@ -26,13 +27,13 @@ def mainlist(item):
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
|
||||
url=urlparse.urljoin(host, "/category/pelicula"), type='pl', pag=1))
|
||||
#itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
# url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
|
||||
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/?s="))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -42,8 +43,10 @@ def category(item):
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(host).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
action = "lista"
|
||||
if item.cat == 'abc':
|
||||
data = scrapertools.find_single_match(data, '<div class="Body Container">(.+?)<main>')
|
||||
action = "lista_a"
|
||||
elif item.cat == 'genre':
|
||||
data = scrapertools.find_single_match(data, '<a>Géneros<\/a><ul class="sub.menu">(.+?)<a>Año<\/a>')
|
||||
elif item.cat == 'year':
|
||||
@@ -54,7 +57,8 @@ def category(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', pag=0))
|
||||
if not scrapedurl.startswith("http"): scrapedurl = host + scrapedurl
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, type='cat', pag=0))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -62,8 +66,6 @@ def search_results(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info(data)
|
||||
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
|
||||
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -84,16 +86,55 @@ def search_results(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
item.pag = 0
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
return lista(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace('"ep0','"epp"')
|
||||
patron = '(?is)MvTbImg B.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'span>Episodio ([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedepi in matches:
|
||||
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
|
||||
#urls = scrapertools.find_multiple_matches(scrapedurls, 'href="([^"]+)')
|
||||
itemlist.append(item.clone(action='findvideos', title=title, url=scrapedurl, thumbnail=scrapedthumbnail, type=item.type,
|
||||
infoLabels=item.infoLabels))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
|
||||
url=item.url, action="add_serie_to_library", extra="episodios",
|
||||
contentSerieName=item.contentSerieName))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista_a(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?is)Num">.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?>.*?'
|
||||
patron += '<strong>([^<]+)<.*?'
|
||||
patron += '<td>([^<]+)<.*?'
|
||||
patron += 'href.*?>([^"]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
action = "findvideos"
|
||||
if "Serie" in scrapedtype: action = "episodios"
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, contentTitle=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
infoLabels={'year':scrapedyear}))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
next = True
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -106,14 +147,12 @@ def lista(item):
|
||||
patron += '<span.*?>([^"]+)<\/span>.+?' #scrapedyear
|
||||
patron += '<a.+?>([^"]+)<\/a>' #scrapedtype
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
title="%s - %s" % (scrapedtitle,scrapedyear)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
type=scrapedtype, infoLabels={'year':scrapedyear})
|
||||
|
||||
if scrapedtype == 'sr':
|
||||
if scrapedtype == 'Serie':
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
new_item.action = 'episodios'
|
||||
else:
|
||||
@@ -135,39 +174,45 @@ def lista(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if not item.urls:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
matches = scrapertools.find_multiple_matches(data, 'http://www.sutorimu[^"]+')
|
||||
else:
|
||||
matches = item.urls
|
||||
for url in matches:
|
||||
if "spotify" in url:
|
||||
data = httptools.downloadpage(item.url).data.replace(""",'"').replace("amp;","").replace("#038;","")
|
||||
matches = scrapertools.find_multiple_matches(data, 'TPlayerTb.*?id="([^"]+)".*?src="([^"]+)"')
|
||||
matches_del = scrapertools.find_multiple_matches(data, '(?is)<!--<td>.*?-->')
|
||||
# Borra los comentarios - que contienen enlaces duplicados
|
||||
for del_m in matches_del:
|
||||
data = data.replace(del_m, "")
|
||||
# Primer grupo de enlaces
|
||||
for id, url1 in matches:
|
||||
language = scrapertools.find_single_match(data, '(?is)data-tplayernv="%s".*?span><span>([^<]+)' %id)
|
||||
data1 = httptools.downloadpage(url1).data
|
||||
url = scrapertools.find_single_match(data1, 'src="([^"]+)')
|
||||
if "a-x" in url:
|
||||
data1 = httptools.downloadpage(url, headers={"Referer":url1}).data
|
||||
url = scrapertools.find_single_match(data1, 'src: "([^"]+)"')
|
||||
if "embed.php" not in url:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url))
|
||||
continue
|
||||
data = httptools.downloadpage(url).data
|
||||
language = scrapertools.find_single_match(data, '(?:ɥɔɐәlq|lɐʇәɯllnɟ) (\w+)')
|
||||
if not language: language = "VOS"
|
||||
bloque = scrapertools.find_single_match(data, "description articleBody(.*)/div")
|
||||
urls = scrapertools.find_multiple_matches(bloque, "iframe src='([^']+)")
|
||||
if urls:
|
||||
# cuando es streaming
|
||||
for url1 in urls:
|
||||
if "luis" in url1:
|
||||
data = httptools.downloadpage(url1).data
|
||||
url1 = scrapertools.find_single_match(data, 'file: "([^"]+)')
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url1))
|
||||
else:
|
||||
# cuando es descarga
|
||||
bloque = bloque.replace('"',"'")
|
||||
urls = scrapertools.find_multiple_matches(bloque, "href='([^']+)")
|
||||
for url2 in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url2))
|
||||
if "data-video" in bloque:
|
||||
urls = scrapertools.find_multiple_matches(bloque, "data-video='([^']+)")
|
||||
for url2 in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = "https://tinyurl.com/%s" %url2 ))
|
||||
for item1 in itemlist:
|
||||
if "tinyurl" in item1.url:
|
||||
item1.url = httptools.downloadpage(item1.url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
data1 = httptools.downloadpage(url).data
|
||||
packed = scrapertools.find_single_match(data1, "(?is)eval\(function\(p,a,c,k,e.*?</script>")
|
||||
unpack = jsunpack.unpack(packed)
|
||||
urls = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+).*?label":"([^"]+)')
|
||||
for url2, quality in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
# Segundo grupo de enlaces
|
||||
matches = scrapertools.find_multiple_matches(data, '<span><a rel="nofollow" target="_blank" href="([^"]+)"')
|
||||
for url in matches:
|
||||
data1 = httptools.downloadpage(url).data
|
||||
matches1 = scrapertools.find_multiple_matches(data1, '"ser".*?</tr>')
|
||||
for ser in matches1:
|
||||
ser = ser.replace("×","x")
|
||||
aud = scrapertools.find_single_match(ser, 'aud"><i class="([^"]+)')
|
||||
sub = scrapertools.find_single_match(ser, 'sub"><i class="([^"]+)')
|
||||
quality = scrapertools.find_single_match(ser, 'res">.*?x([^<]+)')
|
||||
language = "Versión RAW"
|
||||
if aud == "jp" and sub == "si":
|
||||
language = "Sub. Español"
|
||||
matches2 = scrapertools.find_multiple_matches(ser, 'href="([^"]+)')
|
||||
for url2 in matches2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "beeg",
|
||||
"name": "Beeg",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "beeg.png",
|
||||
"banner": "beeg.png",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from core import jsontools as json, httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
url_api = ""
|
||||
beeg_salt = ""
|
||||
Host = "https://beeg.com"
|
||||
|
||||
|
||||
def get_api_url():
|
||||
global url_api
|
||||
global beeg_salt
|
||||
data = scrapertools.downloadpage(Host)
|
||||
version = re.compile('<script src="/static/cpl/([\d]+).js"').findall(data)[0]
|
||||
js_url = Host + "/static/cpl/" + version + ".js"
|
||||
url_api = Host + "/api/v6/" + version
|
||||
data = scrapertools.downloadpage(js_url)
|
||||
beeg_salt = re.compile('beeg_salt="([^"]+)"').findall(data)[0]
|
||||
|
||||
|
||||
def decode(key):
|
||||
a = beeg_salt
|
||||
e = unicode(urllib.unquote(key), "utf8")
|
||||
t = len(a)
|
||||
o = ""
|
||||
for n in range(len(e)):
|
||||
r = ord(e[n:n + 1])
|
||||
i = n % t
|
||||
s = ord(a[i:i + 1]) % 21
|
||||
o += chr(r - s)
|
||||
|
||||
n = []
|
||||
for x in range(len(o), 0, -3):
|
||||
if x >= 3:
|
||||
n.append(o[(x - 3):x])
|
||||
else:
|
||||
n.append(o[0:x])
|
||||
|
||||
return "".join(n)
|
||||
|
||||
|
||||
get_api_url()
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
get_api_url()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc",
|
||||
viewmode="movie"))
|
||||
# itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
|
||||
# url=url_api + "/index/main/0/pc", extra="popular"))
|
||||
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias completo",
|
||||
url=url_api + "/index/main/0/pc", extra="nonpopular"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=url_api + "/index/search/0/pc?query=%s"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
JSONData = json.load(data)
|
||||
|
||||
for Video in JSONData["videos"]:
|
||||
thumbnail = "http://img.beeg.com/236x177/" + Video["id"] + ".jpg"
|
||||
url = url_api + "/video/" + Video["id"]
|
||||
title = Video["title"]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot="", show="",
|
||||
folder=True, contentType="movie"))
|
||||
|
||||
# Paginador
|
||||
Actual = int(scrapertools.get_match(item.url, url_api + '/index/[^/]+/([0-9]+)/pc'))
|
||||
if JSONData["pages"] - 1 > Actual:
|
||||
scrapedurl = item.url.replace("/" + str(Actual) + "/", "/" + str(Actual + 1) + "/")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Página Siguiente", url=scrapedurl, thumbnail="",
|
||||
folder=True, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listcategorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
JSONData = json.load(data)
|
||||
|
||||
# for Tag in JSONData["tags"][item.extra]:
|
||||
for Tag in JSONData["tags"]:
|
||||
url = url_api + "/index/tag/0/pc?tag=" + Tag["tag"]
|
||||
title = '%s - %s' % (str(Tag["tag"]), str(Tag["videos"]))
|
||||
# title = title[:1].upper() + title[1:]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title=title, url=url, folder=True, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url % (texto)
|
||||
|
||||
try:
|
||||
return videos(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
|
||||
JSONData = json.load(data)
|
||||
for key in JSONData:
|
||||
videourl = re.compile("([0-9]+p)", re.DOTALL).findall(key)
|
||||
if videourl:
|
||||
videourl = videourl[0]
|
||||
if not JSONData[videourl] == None:
|
||||
url = JSONData[videourl]
|
||||
url = url.replace("{DATA_MARKERS}", "data=pc.ES")
|
||||
viedokey = re.compile("key=(.*?)%2Cend=", re.DOTALL).findall(url)[0]
|
||||
|
||||
url = url.replace(viedokey, decode(viedokey))
|
||||
if not url.startswith("https:"):
|
||||
url = "https:" + url
|
||||
title = videourl
|
||||
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])
|
||||
|
||||
itemlist.sort(key=lambda item: item[0])
|
||||
return itemlist
|
||||
@@ -50,7 +50,7 @@ def categorias(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -49,8 +49,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -68,7 +68,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle, fanart=scrapedthumbnail))
|
||||
contentTitle = scrapedtitle, fanart=thumbnail))
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
|
||||
if next_page:
|
||||
|
||||
@@ -14,6 +14,7 @@ host = 'https://www.cine-online.eu'
|
||||
IDIOMAS = {'Español': 'ESP', 'Cast': 'ESP', 'Latino': 'LAT', 'Lat': 'LAT', 'Subtitulado': 'VOSE', 'Sub': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['Streamango', 'Vidoza', 'Openload', 'Streamcherry', 'Netutv']
|
||||
# list_quality = ['Brscreener', 'HD', 'TS']
|
||||
list_quality = []
|
||||
__channel__='cineonline'
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
|
||||
@@ -75,7 +76,7 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "Año" in item.title:
|
||||
data = scrapertools.get_match(data,'<h3>Año de estreno(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<h3>Año de estreno(.*?)</ul>')
|
||||
patron = '<li><a href="([^"]+)">(\d+)</(\w)>'
|
||||
else:
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a> <span>(\d+)</span>'
|
||||
@@ -89,6 +90,9 @@ def categorias(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -200,11 +204,11 @@ def findvideos(item):
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
if not "/episodios/" in item.url:
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos':
|
||||
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' and not "/episodios/" in item.url :
|
||||
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -68,7 +68,7 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = title, infoLabels={'year':year} ))
|
||||
fanart=thumbnail, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -59,7 +59,7 @@ def lista(item):
|
||||
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
|
||||
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, viewmode="movie") )
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
|
||||
@@ -37,7 +37,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="category">(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<div class="category">(.*?)</ul>')
|
||||
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -75,7 +75,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
|
||||
@@ -28,7 +28,7 @@ def mainlist(item):
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/",
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/", extra="Peliculas Animadas",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "?s=",
|
||||
thumbnail=thumb_series))
|
||||
@@ -48,10 +48,10 @@ def sub_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)class="thumbnail animation-.*?href="([^"]+).*?'
|
||||
patron += 'img src="([^"]+).*?'
|
||||
patron += 'alt="([^"]+).*?'
|
||||
patron += 'class="meta"(.*?)class="contenido"'
|
||||
patron = '(?s)class="thumbnail animation-.*?href=([^>]+).*?'
|
||||
patron += 'src=(.*?(?:jpg|jpeg)).*?'
|
||||
patron += 'alt=(?:"|)(.*?)(?:"|>).*?'
|
||||
patron += 'class=year>(.*?)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
scrapedyear = scrapertools.find_single_match(scrapedyear, 'class="year">(\d{4})')
|
||||
@@ -79,8 +79,8 @@ def mainpage(item):
|
||||
itemlist = []
|
||||
data1 = httptools.downloadpage(item.url).data
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
|
||||
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
|
||||
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
|
||||
patron_sec='<divclass=head-main-nav>(.+?)peliculas\/>'
|
||||
patron='<ahref=([^"]+)>([^"]+)<\/a>'#scrapedurl, #scrapedtitle
|
||||
data = scrapertools.find_single_match(data1, patron_sec)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.title=="Géneros" or item.title=="Categorías":
|
||||
@@ -105,26 +105,28 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
if item.title=="Peliculas Animadas":
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<div id="archive-content" class="animation-2 items">(.*)<a href=\'')
|
||||
if item.extra == "Peliculas Animadas":
|
||||
data_lista = scrapertools.find_single_match(data, '(?is)archive-content(.*?)class=pagination')
|
||||
else:
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<div class="items">(.+?)<\/div><\/div><div class=.+?>')
|
||||
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
|
||||
data_lista = scrapertools.find_single_match(data, 'class=items><article(.+?)<\/div><\/article><\/div>')
|
||||
patron = '(?is)src=(.*?(?:jpg|jpeg)).*?'
|
||||
patron += 'alt=(?:"|)(.*?)(?:"|>).*?'
|
||||
patron += 'href=([^>]+)>.*?'
|
||||
patron += 'title.*?<span>([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron)
|
||||
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
|
||||
if item.title=="Peliculas Animadas":
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
|
||||
plot=scrapedplot, action="findvideos", show=scrapedtitle))
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
|
||||
action="findvideos", contentTitle=scrapedtitle, infoLabels={'year':scrapedyear}))
|
||||
else:
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
|
||||
if item.title!="Peliculas Animadas":
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
context=autoplay.context,action="episodios", contentSerieName=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
next_page = scrapertools.find_single_match(data, 'rel=next href=([^>]+)>')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(action="lista", title="Página siguiente>>", url=next_page, extra=item.extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -133,13 +135,15 @@ def episodios(item):
|
||||
itemlist = []
|
||||
infoLabels = {}
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)<ul class="episodios">(.+?)<span>Compartido'
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info(data)
|
||||
patron = '<divid=episodes (.+?)<\/div><\/div><\/div>'
|
||||
data_lista = scrapertools.find_single_match(data,patron)
|
||||
contentSerieName = item.title
|
||||
patron_caps = 'href="([^"]+)".*?'
|
||||
patron_caps += 'src="([^"]+)".*?'
|
||||
patron_caps += 'numerando">([^<]+).*?'
|
||||
patron_caps += 'episodiotitle">.*?>([^<]+)'
|
||||
patron_caps = 'href=(.+?)><imgalt=".+?" '
|
||||
patron_caps += 'src=([^"]+)><\/a>.*?'
|
||||
patron_caps += 'numerando>([^<]+).*?'
|
||||
patron_caps += 'episodiotitle>.*?>([^<]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtempepi, scrapedtitle in matches:
|
||||
tempepi=scrapedtempepi.split(" - ")
|
||||
@@ -148,7 +152,7 @@ def episodios(item):
|
||||
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
|
||||
item.infoLabels["season"] = tempepi[0]
|
||||
item.infoLabels["episode"] = tempepi[1]
|
||||
itemlist.append(item.clone(thumbnail=scrapedthumbnail,
|
||||
itemlist.append(item.clone(#thumbnail=scrapedthumbnail,
|
||||
action="findvideos", title=title, url=scrapedurl))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + contentSerieName + " a la videoteca[/COLOR]", url=item.url,
|
||||
@@ -161,7 +165,7 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'player-option-\d+.*?'
|
||||
patron += 'data-sv="([^"]+).*?'
|
||||
patron += 'data-sv=(\w+).*?'
|
||||
patron += 'data-user="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
@@ -170,8 +174,6 @@ def findvideos(item):
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
|
||||
url = base64.b64decode(scrapertools.find_single_match(data1, '<iframe data-source="([^"]+)"'))
|
||||
url1 = devuelve_enlace(url)
|
||||
if "drive.google" in url1:
|
||||
url1 = url1.replace("view","preview")
|
||||
if url1:
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
@@ -194,11 +196,5 @@ def devuelve_enlace(url1):
|
||||
url = 'https:' + url1
|
||||
new_data = httptools.downloadpage(url).data
|
||||
new_data = new_data.replace('"',"'")
|
||||
url1 = scrapertools.find_single_match(new_data, "iframe src='([^']+)")
|
||||
new_data = httptools.downloadpage(url1).data
|
||||
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
|
||||
if "zkstream" in url or "cloudup" in url:
|
||||
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
else:
|
||||
url1 = url
|
||||
url1 = scrapertools.find_single_match(new_data, "sources.*?file: '([^']+)")
|
||||
return url1
|
||||
|
||||
@@ -69,7 +69,7 @@ def agregadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
fichas = re.sub(r"\n|\s{2}","",scrapertools.get_match(data,'<div class="review-box-container">(.*?)wp-pagenavi'))
|
||||
fichas = re.sub(r"\n|\s{2}","",scrapertools.find_single_match(data,'<div class="review-box-container">(.*?)wp-pagenavi'))
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)".*?' # url
|
||||
patron+= 'title="([^"]+)".*?' # title
|
||||
patron+= 'src="([^"]+).*?' # thumbnail
|
||||
|
||||
@@ -158,7 +158,7 @@ def findvideos(item):
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, "<div class='post-body entry-content'(.*?)<div class='post-footer'>")
|
||||
data = scrapertools.find_single_match(data, "<div class='post-body entry-content'(.*?)<div class='post-footer'>")
|
||||
|
||||
# Busca los enlaces a los videos
|
||||
listavideos = servertools.findvideos(data)
|
||||
|
||||
@@ -85,7 +85,7 @@ def submenu(item):
|
||||
|
||||
if item.extra == "series":
|
||||
|
||||
item.url_plus = "serie/"
|
||||
item.url_plus = "series-12/"
|
||||
itemlist.append(item.clone(title="Series completas", action="listado", url=item.url + item.url_plus, url_plus=item.url_plus, thumbnail=thumb_series, extra="series"))
|
||||
itemlist.append(item.clone(title="Alfabético A-Z", action="alfabeto", url=item.url + item.url_plus + "?s=letra-%s", url_plus=item.url_plus, thumbnail=thumb_series, extra="series"))
|
||||
|
||||
@@ -141,7 +141,7 @@ def categorias(item):
|
||||
if not extra3:
|
||||
itemlist.append(item.clone(title="Todas las " + item.extra.upper(), action="listado"))
|
||||
itemlist.append(item.clone(title="Alfabético A-Z", action="alfabeto", url=item.url + "?s=letra-%s"))
|
||||
itemlist.append(item.clone(title="Géneros", url=item.url))
|
||||
#itemlist.append(item.clone(title="Géneros", url=item.url))
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if item.url_plus not in scrapedurl:
|
||||
|
||||
@@ -6,6 +6,7 @@ import urlparse
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -18,7 +19,7 @@ def mainlist(item):
|
||||
# ------------------------------------------------------
|
||||
# Descarga la página
|
||||
# ------------------------------------------------------
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.info(data)
|
||||
|
||||
# ------------------------------------------------------
|
||||
@@ -68,7 +69,7 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info(data)
|
||||
|
||||
# Extrae las películas
|
||||
|
||||
@@ -24,7 +24,7 @@ list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['torrent']
|
||||
|
||||
host = 'http://www.elitetorrent.biz'
|
||||
host = 'http://www.elitetorrent.io'
|
||||
channel = "elitetorrent"
|
||||
|
||||
categoria = channel.capitalize()
|
||||
@@ -85,9 +85,9 @@ def submenu(item):
|
||||
return itemlist #Algo no funciona, pintamos lo que tenemos
|
||||
|
||||
patron = '<div class="cab_menu">.*?<\/div>' #Menú principal
|
||||
data1 = scrapertools.get_match(data, patron)
|
||||
data1 = scrapertools.find_single_match(data, patron)
|
||||
patron = '<div id="menu_langen">.*?<\/div>' #Menú de idiomas
|
||||
data1 += scrapertools.get_match(data, patron)
|
||||
data1 += scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<a href="(.*?)".*?title="(.*?)"' #Encontrar todos los apartados
|
||||
matches = re.compile(patron, re.DOTALL).findall(data1)
|
||||
@@ -155,16 +155,16 @@ def listado(item):
|
||||
patron = '<div id="principal">.*?<\/nav><\/div><\/div>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li>.*?<a href="(.*?)".*?' #url
|
||||
patron += 'title="(.*?)".*?' #título
|
||||
patron += 'src="(.*?)".*?' #thumb
|
||||
patron += "title='(.*?)'.*?" #categoría, idioma
|
||||
patron += '"><i>(.*?)<\/i><\/span.*?' #calidad
|
||||
patron += '="dig1">(.*?)<.*?' #tamaño
|
||||
patron += '="dig2">(.*?)<\/span><\/div>' #tipo tamaño
|
||||
patron = '<li>\s*<div\s*class="[^"]+">\s*<a href="([^"]+)"\s*' #url
|
||||
patron += 'title="([^"]+)"\s*(?:alt="[^"]+")?\s*>\s*' #título
|
||||
patron += '<img (?:class="[^"]+")?\s*src="([^"]+)"\s*border="[^"]+"\s*' #thumb
|
||||
patron += 'title="([^"]+)".*?' #categoría, idioma
|
||||
patron += '<span class="[^"]+" style="[^"]+"\s*><i>(.*?)<\/i><\/span.*?' #calidad
|
||||
patron += '="dig1">(.*?)<.*?' #tamaño
|
||||
patron += '="dig2">(.*?)<\/span><\/div>' #tipo tamaño
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches and not '<title>503 Backend fetch failed</title>' in data: #error
|
||||
if not matches and not '<title>503 Backend fetch failed</title>' in data and not 'No se han encontrado resultados' in data: #error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_listado(item, itemlist) #Llamamos al método para el pintado del error
|
||||
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
url="https:" + scrapedurl
|
||||
thumbnail="https:" + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
|
||||
plot=scrapedplot) )
|
||||
fanart=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>')
|
||||
if next_page == "":
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>»</a>')
|
||||
|
||||
@@ -37,7 +37,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"')
|
||||
data = scrapertools.find_single_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"')
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
|
||||
@@ -57,8 +57,8 @@ def lista(item):
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=scrapedthumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle) )
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle) )
|
||||
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente »</a>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
|
||||
@@ -25,6 +25,7 @@ list_quality = []
|
||||
list_servers = ['torrent']
|
||||
|
||||
host = 'http://estrenosby.net/' # 'http://estrenosli.org/'
|
||||
host_alt = 'http://estrenoske.net/'
|
||||
channel = "estrenosgo"
|
||||
|
||||
color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']
|
||||
@@ -320,7 +321,8 @@ def listado(item):
|
||||
patron_serie = '<div id="where_i_am">.*?<a href="[^"]+">.*?<\/a>.*?<a href="([^"]+)">'
|
||||
url = scrapertools.find_single_match(data_serie, patron_serie) #buscamos la url de la serie completa
|
||||
if url:
|
||||
url = host + url
|
||||
if host not in url and host_alt not in url:
|
||||
url = host + url
|
||||
extra = 'series' #es una serie completa
|
||||
title_lista += [cat_sec] #la añadimos a la lista de series completas procesadas
|
||||
title = cat_sec #salvamos el título de la serie completa
|
||||
@@ -361,7 +363,10 @@ def listado(item):
|
||||
quality_alt = cat_sec.lower().strip()
|
||||
item_local.extra = extra #guardamos el extra procesado
|
||||
item_local.url = url #guardamos la url final
|
||||
item_local.thumbnail = host[:-1] + scrapedthumbnail #guardamos el thumb
|
||||
if host not in scrapedthumbnail and host_alt not in scrapedthumbnail:
|
||||
item_local.thumbnail = host[:-1] + scrapedthumbnail #guardamos el thumb
|
||||
else:
|
||||
item_local.thumbnail = scrapedthumbnail #guardamos el thumb sin Host
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
item_local.contentType = "movie" #por defecto, son películas
|
||||
@@ -743,7 +748,7 @@ def findvideos(item):
|
||||
#Ahora tratamos los enlaces .torrent
|
||||
itemlist_alt = [] #Usamos una lista intermedia para poder ordenar los episodios
|
||||
if matches_torrent:
|
||||
for scrapedurl, scrapedquality, scrapedlang in matches_torrent: #leemos los torrents con la diferentes calidades
|
||||
for scrapedurl, scrapedquality, scrapedlang in matches_torrent: #leemos los torrents con la diferentes calidades
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
|
||||
@@ -772,7 +777,7 @@ def findvideos(item):
|
||||
patron = '<div class="linksDescarga"><span class="titulo">Descargar Torrent: <\/span><br><a href="([^"]+)" class="TTlink">»\s?(.*?)\s?«<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
else:
|
||||
matches = item.emergency_urls[2][0] #Guardamos los matches de Directos, si los hay
|
||||
matches = item.emergency_urls[2][0] #Guardamos los matches de Directos, si los hay
|
||||
del item.emergency_urls[2][0] #Una vez tratado lo limpiamos
|
||||
data = 'xyz123' #iniciamos data para que no dé problemas
|
||||
|
||||
@@ -781,15 +786,20 @@ def findvideos(item):
|
||||
|
||||
if not data or not matches:
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / URL: " + item_local.url + " / DATA: " + data)
|
||||
continue #si no hay más datos, algo no funciona, pasamos a Ver Online
|
||||
continue #si no hay más datos, algo no funciona, pasamos a Ver Online
|
||||
|
||||
#logger.debug(patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
for scrapedtorrent, scrapedtitle in matches:
|
||||
for scrapedtorrent_alt, scrapedtitle in matches:
|
||||
if host not in scrapedtorrent_alt and host_alt not in scrapedtorrent_alt:
|
||||
scrapedtorrent = host + scrapedtorrent_alt
|
||||
else:
|
||||
scrapedtorrent = scrapedtorrent_alt
|
||||
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls[0].append(host + scrapedtorrent)
|
||||
item.emergency_urls[0].append(scrapedtorrent)
|
||||
else:
|
||||
item_local = item_local.clone()
|
||||
quality = item_local.quality
|
||||
@@ -829,19 +839,19 @@ def findvideos(item):
|
||||
quality = '[%s] %s' % (qualityscraped, item_local.quality)
|
||||
|
||||
#Ahora pintamos el link del Torrent
|
||||
item_local.url = host + scrapedtorrent
|
||||
item_local.url = scrapedtorrent
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
if item.armagedon:
|
||||
item_local.url = item.emergency_urls[0][0] #... ponemos la emergencia como primaria
|
||||
del item.emergency_urls[0][0] #Una vez tratado lo limpiamos
|
||||
item_local.url = item.emergency_urls[0][0] #... ponemos la emergencia como primaria
|
||||
del item.emergency_urls[0][0] #Una vez tratado lo limpiamos
|
||||
|
||||
size = ''
|
||||
if not item.armagedon:
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
quality += ' [%s]' % size
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % quality
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language))
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/pornstars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -50,7 +51,13 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
if "/categories/" in item.url:
|
||||
itemlist = sorted(itemlist, key=lambda i: i.title)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -69,7 +76,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle) )
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
|
||||
@@ -48,7 +48,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -59,11 +59,14 @@ def lista(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" itemprop="url">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<span itemprop="duration" class="length">(.*?)</span>'
|
||||
patron += '<span itemprop="duration" class="length">(.*?)</span>(.*?)<span class="thumb-info">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion,calidad in matches:
|
||||
url = scrapedurl
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
if ">HD<" in calidad:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " +scrapedtitle
|
||||
else:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
|
||||
@@ -8,14 +8,14 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
# BLOQUEO ESET INTERNET SECURITY
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if item.url=="":
|
||||
item.url = "http://www.filmovix.net/videoscategory/porno/"
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h1 class="cat_head">XXX</h1>(.*?)<h3> Novo dodato </h3>')
|
||||
data = scrapertools.find_single_match(data,'<h1 class="cat_head">XXX</h1>(.*?)<h3> Novo dodato </h3>')
|
||||
patron = '<li class="clearfix">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<p class="title"><a href="([^"]+)" rel="bookmark" title="([^"]+)">'
|
||||
|
||||
@@ -30,7 +30,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ def lista(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)">Next')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -52,7 +52,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="thumb tco1" href="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail + "|Referer=%s" %host
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -80,7 +80,8 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url = scrapertools.find_single_match(scrapertools.cachePage(item.url),'<iframe src="([^"]+)"')
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
data = httptools.downloadpage(url).data
|
||||
patron = 'html5player.setVideoHLS\\(\'([^\']+)\''
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
@@ -16,6 +16,7 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-raped/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -39,25 +40,32 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="thumb thumb-category">.*?'
|
||||
patron = '<li class="thumb thumb-\w+">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img class="lazy" data-original="([^"]+)">.*?'
|
||||
patron += '<div class="name">([^"]+)</div>.*?'
|
||||
patron += '<div class="count">(\d+)</div>'
|
||||
patron += '<img class="lazy" data-original="([^"]+)".*?'
|
||||
patron += '<div class="title">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
title = scrapertools.find_single_match(scrapedtitle,'<div class="text">([^<]+)<')
|
||||
if "/categories/" in item.url:
|
||||
cantidad = scrapertools.find_single_match(scrapedtitle,'<div class="count">(\d+)</div>')
|
||||
scrapedtitle = scrapertools.find_single_match(scrapedtitle,'<div class="name">([^<]+)</div>')
|
||||
title = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="thumb">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
@@ -72,7 +80,7 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -83,7 +91,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<meta property="og:video" content="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -39,7 +39,7 @@ def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'>Top Sites</a>(.*?)</aside>')
|
||||
data = scrapertools.find_single_match(data,'>Top Sites</a>(.*?)</aside>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -54,7 +54,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'Top Tags(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'Top Tags(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -77,11 +77,12 @@ def lista(item):
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
title = scrapedtitle
|
||||
calidad = scrapertools.find_single_match(scrapedtitle, '(\(.*?\))')
|
||||
title = "[COLOR yellow]" + calidad + "[/COLOR] " + scrapedtitle.replace( "%s" % calidad, "")
|
||||
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, fulltitle=title) )
|
||||
fanart=thumbnail, plot=plot, fulltitle=title) )
|
||||
next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -68,7 +68,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<a>CATEGORÍAS</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<a>CATEGORÍAS</a>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -147,7 +147,7 @@ def findvideos(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
|
||||
@@ -498,6 +498,7 @@ def findvideos(item):
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #salimos
|
||||
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
data = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>') #Seleccionamos la zona de links
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a href="([^"]+)" class="thumb">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<span class="dur">(.*?)</span>'
|
||||
@@ -78,7 +78,7 @@ def peliculas(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -16,6 +16,7 @@ from channelselector import get_thumb
|
||||
|
||||
host = "https://hdfull.me"
|
||||
|
||||
|
||||
if config.get_setting('hdfulluser', 'hdfull'):
|
||||
account = True
|
||||
else:
|
||||
@@ -128,10 +129,11 @@ def menuseries(item):
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
data = agrupa_datos(httptools.downloadpage(host).data)
|
||||
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
|
||||
sid = scrapertools.find_single_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
|
||||
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
|
||||
item.title = "Buscar..."
|
||||
item.url = host + "/buscar"
|
||||
item.texto = texto
|
||||
try:
|
||||
return fichas(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -160,8 +162,8 @@ def items_usuario(item):
|
||||
## Fichas usuario
|
||||
url = item.url.split("?")[0]
|
||||
post = item.url.split("?")[1]
|
||||
old_start = scrapertools.get_match(post, 'start=([^&]+)&')
|
||||
limit = scrapertools.get_match(post, 'limit=(\d+)')
|
||||
old_start = scrapertools.find_single_match(post, 'start=([^&]+)&')
|
||||
limit = scrapertools.find_single_match(post, 'limit=(\d+)')
|
||||
start = "%s" % (int(old_start) + int(limit))
|
||||
post = post.replace("start=" + old_start, "start=" + start)
|
||||
next_page = url + "?" + post
|
||||
@@ -241,12 +243,12 @@ def fichas(item):
|
||||
|
||||
if item.title == "Buscar...":
|
||||
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
|
||||
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
|
||||
s_p = scrapertools.find_single_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
|
||||
'<h3 class="section-title">')
|
||||
if len(s_p) == 1:
|
||||
data = s_p[0]
|
||||
if 'Lo sentimos</h3>' in s_p[0]:
|
||||
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20',
|
||||
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + item.texto.replace('%20',
|
||||
' ') + "[/COLOR] sin resultados")]
|
||||
else:
|
||||
data = s_p[0] + s_p[1]
|
||||
@@ -330,7 +332,7 @@ def episodios(item):
|
||||
data = agrupa_datos(httptools.downloadpage(item.url).data)
|
||||
if id == "0":
|
||||
## Se saca el id de la serie de la página cuando viene de listado_series
|
||||
id = scrapertools.get_match(data, "<script>var sid = '([^']+)';</script>")
|
||||
id = scrapertools.find_single_match(data, "<script>var sid = '([^']+)';</script>")
|
||||
url_targets = url_targets.replace('###0', '###' + id)
|
||||
str = get_status(status, "shows", id)
|
||||
if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
|
||||
@@ -355,8 +357,8 @@ def episodios(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
|
||||
sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
|
||||
ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
|
||||
sid = scrapertools.find_single_match(data, "<script>var sid = '(\d+)'")
|
||||
ssid = scrapertools.find_single_match(scrapedurl, "temporada-(\d+)")
|
||||
post = "action=season&start=0&limit=0&show=%s&season=%s" % (sid, ssid)
|
||||
url = host + "/a/episodes"
|
||||
data = httptools.downloadpage(url, post=post).data
|
||||
@@ -411,7 +413,7 @@ def novedades_episodios(item):
|
||||
## Episodios
|
||||
url = item.url.split("?")[0]
|
||||
post = item.url.split("?")[1]
|
||||
old_start = scrapertools.get_match(post, 'start=([^&]+)&')
|
||||
old_start = scrapertools.find_single_match(post, 'start=([^&]+)&')
|
||||
start = "%s" % (int(old_start) + 24)
|
||||
post = post.replace("start=" + old_start, "start=" + start)
|
||||
next_page = url + "?" + post
|
||||
@@ -701,7 +703,7 @@ def jhexdecode(t):
|
||||
else:
|
||||
return ""
|
||||
r = re.sub(r'(?:\\|)x(\w{2})', to_hx, r).replace('var ', '')
|
||||
f = eval(scrapertools.get_match(r, '\s*var_0\s*=\s*([^;]+);'))
|
||||
f = eval(scrapertools.find_single_match(r, '\s*var_0\s*=\s*([^;]+);'))
|
||||
for i, v in enumerate(f):
|
||||
r = r.replace('[[var_0[%s]]' % i, "." + f[i])
|
||||
r = r.replace(':var_0[%s]' % i, ":\"" + f[i] + "\"")
|
||||
|
||||
@@ -37,7 +37,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<ul class="cf">(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<ul class="cf">(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li>.*?<a href="([^"]+)".*?'
|
||||
patron += '<img class="thumb" src="([^"]+)" alt="([^"]+)".*?'
|
||||
@@ -56,8 +56,8 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = scrapertools.get_match(data,'<ul class="cf">(.*?)<h2>Advertisement</h2>')
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data,'<ul class="cf">(.*?)<h2>Advertisement</h2>')
|
||||
patron = '<li>.*?<a href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<span class="time">(.*?)</span>'
|
||||
@@ -79,7 +79,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url\+="([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -45,7 +45,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video-thumb"><a href="([^"]+)" class="title".*?>([^"]+)</a>.*?'
|
||||
patron += '<span class="time">([^<]+)</span>.*?'
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -80,7 +80,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source data-fluid-hd src="([^"]+)/?br=\d+"')
|
||||
if scrapedurl=="":
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)/?br=\d+"')
|
||||
|
||||
@@ -86,7 +86,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article class="item" data-video-id="([^"]+)">.*?src="([^"]+)" alt="([^"]+)".*?<div class="thumbnail__info__right">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -107,7 +107,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url\+=\'([^\']+)\'')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -8,6 +8,7 @@ from threading import Thread
|
||||
|
||||
import xbmc
|
||||
import xbmcgui
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
@@ -15,6 +16,7 @@ from core.scrapertools import decodeHtmlentities as dhe
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
global mainWindow
|
||||
mainWindow = list()
|
||||
ActoresWindow = None
|
||||
TrailerWindow = None
|
||||
@@ -43,7 +45,6 @@ if xinfoplus_set == config.get_localized_string(70130):
|
||||
set_animation = True
|
||||
|
||||
def start(item, recomendaciones=[], from_window=False):
|
||||
global mainWindow
|
||||
if from_window:
|
||||
global relatedWindow, ActorInfoWindow, ActoresWindow, BusquedaWindow, TrailerWindow, imagesWindow
|
||||
create = [relatedWindow, ActorInfoWindow, ActoresWindow, BusquedaWindow, TrailerWindow, imagesWindow]
|
||||
@@ -192,13 +193,13 @@ class main(xbmcgui.WindowDialog):
|
||||
titulo = re.sub("'", "", titulo)
|
||||
url_tvthemes = "http://televisiontunes.com/search.php?q=%s" % titulo.replace(' ', '+')
|
||||
|
||||
data = scrapertools.downloadpage(url_tvthemes)
|
||||
data = httptools.downloadpage(url_tvthemes).data
|
||||
page_theme = scrapertools.find_single_match(data, '<!-- sond design -->.*?<li><a href="([^"]+)"')
|
||||
|
||||
if page_theme:
|
||||
page_theme = "http://televisiontunes.com" + page_theme
|
||||
data = scrapertools.downloadpage(page_theme)
|
||||
song = scrapertools.get_match(data, '<form name="song_name_form">.*?type="hidden" value="(.*?)"')
|
||||
data = httptools.downloadpage(page_theme).data
|
||||
song = scrapertools.find_single_match(data, '<form name="song_name_form">.*?type="hidden" value="(.*?)"')
|
||||
song = song.replace(" ", "%20")
|
||||
pl = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
|
||||
pl.clear()
|
||||
@@ -1515,7 +1516,7 @@ class ActorInfo(xbmcgui.WindowDialog):
|
||||
|
||||
actor_tmdb = tmdb.Tmdb(discover=search)
|
||||
if not actor_tmdb.result.get("biography") and actor_tmdb.result.get("imdb_id"):
|
||||
data = scrapertools.downloadpage("http://www.imdb.com/name/%s/bio" % actor_tmdb.result["imdb_id"])
|
||||
data = httptools.downloadpage("http://www.imdb.com/name/%s/bio" % actor_tmdb.result["imdb_id"]).data
|
||||
info = scrapertools.find_single_match(data, '<div class="soda odd">.*?<p>(.*?)</p>')
|
||||
if info:
|
||||
bio = dhe(scrapertools.htmlclean(info.strip()))
|
||||
@@ -2267,7 +2268,7 @@ def get_filmaf(item, infoLabels):
|
||||
year = str(infoLabels.get("year", ""))
|
||||
url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
|
||||
title, year)
|
||||
data = scrapertools.downloadpage(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
|
||||
tipo = "película"
|
||||
if item.contentType != "movie":
|
||||
@@ -2275,7 +2276,7 @@ def get_filmaf(item, infoLabels):
|
||||
url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
|
||||
if url_filmaf:
|
||||
url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
|
||||
data = scrapertools.downloadpage(url_filmaf)
|
||||
data = httptools.downloadpage(url_filmaf).data
|
||||
|
||||
rating = scrapertools.find_single_match(data, 'itemprop="ratingValue" content="([^"]+)"')
|
||||
if not rating:
|
||||
@@ -2318,7 +2319,7 @@ def fanartv(item, infoLabels, images={}):
|
||||
% infoLabels['tmdb_id']
|
||||
else:
|
||||
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_search
|
||||
data = jsontools.load(scrapertools.downloadpage(url, headers=headers))
|
||||
data = jsontools.load(httptools.downloadpage(url, headers=headers).data)
|
||||
if data and not "error message" in data:
|
||||
for key, value in data.items():
|
||||
if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]:
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://javl.in'
|
||||
|
||||
# BLOQUEO ANTIVIRUS
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
@@ -27,7 +27,7 @@ def mainlist(item):
|
||||
fanart = ''
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail))
|
||||
|
||||
# Paginacion
|
||||
title = ''
|
||||
|
||||
@@ -50,7 +50,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
|
||||
@@ -51,7 +51,7 @@ def categorias(item):
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
|
||||
return itemlist
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -67,7 +67,7 @@ def lista(item):
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
scrapedurl = "http://xxx.justporno.tv/embed/" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(.*?)>')
|
||||
|
||||
4
plugin.video.alfa/channels/maxipelis24.py
Normal file → Executable file
4
plugin.video.alfa/channels/maxipelis24.py
Normal file → Executable file
@@ -12,7 +12,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = "https://maxipelis24.tv"
|
||||
host = "https://maxipelis24.live"
|
||||
|
||||
IDIOMAS = {'Latino': 'Latino', 'Sub':'VOSE', 'Subtitulado': 'VOSE', 'Español': 'CAST', 'Castellano':'CAST'}
|
||||
list_language = IDIOMAS.values()
|
||||
@@ -135,7 +135,7 @@ def findvideos(item):
|
||||
action='play', language=IDIOMAS[idioma], infoLabels=item.infoLabels)
|
||||
itemlist.append(new_item)
|
||||
|
||||
if 'maxipelis24.tv/hideload/?' in link:
|
||||
if '/hideload/?' in link:
|
||||
id_letter = scrapertools.find_single_match(link, '?(\w)d')
|
||||
id_type = '%sd' % id_letter
|
||||
ir_type = '%sr' % id_letter
|
||||
|
||||
@@ -5,9 +5,10 @@ import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import httptools, proxytools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -26,7 +27,7 @@ list_servers = ['torrent']
|
||||
|
||||
channel = "mejortorrent"
|
||||
|
||||
host = 'http://www.mejortorrent.tv/'
|
||||
host = 'http://www.mejortorrent.tv'
|
||||
host_sufix = '.tv'
|
||||
#host = config.get_setting('domain_name', channel)
|
||||
|
||||
@@ -61,7 +62,7 @@ def mainlist(item):
|
||||
thumbnail=thumb_pelis_hd))
|
||||
itemlist.append(Item(channel=item.channel, title="Películas Listado Alfabetico", action="alfabeto",
|
||||
url= host + "/peliculas-buscador.html" +
|
||||
"?campo=letra&valor&valor2=Acci%%F3n&valor3=%s&valor4=3&submit=Buscar", extra="peliculas",
|
||||
"?campo=letra&valor=&valor2=Acci%%F3n&valor3=%s&valor4=3&submit=Buscar", extra="peliculas",
|
||||
thumbnail=thumb_pelis))
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="listado", extra="series", tipo=False,
|
||||
url= host + "/torrents-de-series.html", thumbnail=thumb_series))
|
||||
@@ -102,7 +103,7 @@ def alfabeto(item):
|
||||
url= host + "/secciones.php?sec=descargas&ap=series_hd&func=mostrar&letra=."))
|
||||
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra="series", tipo=True,
|
||||
url= host + "/secciones.php?sec=descargas&ap=series_hd&func=mostrar&letra=" + letra.lower()))
|
||||
url= host + "/secciones.php?sec=descargas&ap=series_hd&func=mostrar&letra=" + letra))
|
||||
|
||||
elif item.extra == "series" or item.extra == "documentales":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra=item.extra, tipo=True, url= host + "/" + item.extra + "-letra-..html"))
|
||||
@@ -112,7 +113,7 @@ def alfabeto(item):
|
||||
elif item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra=item.extra, tipo=True, url=item.url % "."))
|
||||
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra=item.extra, tipo=True, url=item.url % letra.lower()))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra=item.extra, tipo=True, url=item.url % letra))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -128,13 +129,15 @@ def listado(item):
|
||||
del item.totalItems
|
||||
|
||||
try:
|
||||
# La url de Películas por orden Alfabético tiene un formato distinto
|
||||
data = ''
|
||||
# La url de Películas por orden Alfabético tiene un formato distinto
|
||||
if item.extra == "peliculas" and item.tipo:
|
||||
url = item.url.split("?")
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url[0], post=url[1]).data)
|
||||
else:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
except:
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
@@ -145,35 +148,35 @@ def listado(item):
|
||||
|
||||
# En este canal las url's y los títulos tienen diferente formato dependiendo del contenido
|
||||
if item.extra == "peliculas" and item.tipo: #Desde Lista Alfabética
|
||||
patron = "<a href='(/peli-descargar-torrent[^']+)'()"
|
||||
patron = "<a href='((?:[^']+)?/peli-descargar-torrent[^']+)'()"
|
||||
patron_enlace = "/peli-descargar-torrent-\d+-(.*?)\.html"
|
||||
patron_title = "<a href='/peli-descargar-torrent[^']+'[^>]+>([^>]+)</a>(\s*<b>([^>]+)</b>)?"
|
||||
patron_title = "<a href='(?:[^']+)?/peli-descargar-torrent[^']+'[^>]+>([^>]+)</a>(\s*<b>([^>]+)</b>)?"
|
||||
item.action = "findvideos"
|
||||
item.contentType = "movie"
|
||||
pag = False #No hay paginación
|
||||
elif item.extra == "peliculas" and not item.tipo: #Desde Menú principal
|
||||
patron = '<a href="(/peli-descargar-torrent[^"]+)">?'
|
||||
patron = '<a href="((?:[^"]+)?/peli-descargar-torrent[^"]+)">?'
|
||||
patron += '<img src="([^"]+)"[^<]+</a>'
|
||||
patron_enlace = "/peli-descargar-torrent-\d+-(.*?)\.html"
|
||||
patron_title = '<a href="/peli-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?'
|
||||
patron_title = '<a href="(?:[^"]+)?/peli-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?'
|
||||
item.action = "findvideos"
|
||||
item.contentType = "movie"
|
||||
pag = True #Sí hay paginación
|
||||
cnt_tot = 25 # Poner el num. máximo de items por página. Parece que hay 50
|
||||
elif item.extra == "series" and item.tipo:
|
||||
patron = "<a href='(/serie-descargar-torrent[^']+)'>()"
|
||||
patron = "<a href='((?:[^']+)?/serie-descargar-torrent[^']+)'>()"
|
||||
patron_enlace = "\/serie-descargar-torrent*.-\d+-?\d+-(.*?)\.html"
|
||||
patron_title = "<a href='\/serie-descargar-torrent[^']+'>([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?"
|
||||
patron_title = "<a href='(?:[^']+)?\/serie-descargar-torrent[^']+'>([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?"
|
||||
patron_title_ep = "\/serie-descargar-torrent*.-\d+-?\d+-(.*?)-\d+x\d+.*?\.html"
|
||||
patron_title_se = "\/serie-descargar-torrent*.-\d+-?\d+-(.*?)-\d+-Temp.*?\.html"
|
||||
item.action = "episodios"
|
||||
item.contentType = "season"
|
||||
pag = False
|
||||
elif item.extra == "series" and not item.tipo:
|
||||
patron = '<a href="(\/serie-[^a_z]{0,10}descargar-torrent[^"]+)">?'
|
||||
patron = '<a href="((?:[^"]+)?\/serie-[^a_z]{0,10}descargar-torrent[^"]+)">?'
|
||||
patron += '<img src="([^"]+)"[^<]+</a>'
|
||||
patron_enlace = "\/serie-[^a_z]{0,10}descargar-torrent*.-\d+-?\d+-(.*?)\.html"
|
||||
patron_title = '<a href="/serie-[^a_z]{0,10}descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?'
|
||||
patron_title = '<a href="(?:[^"]+)?/serie-[^a_z]{0,10}descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?'
|
||||
patron_title_ep = "\/serie-[^a_z]{0,10}descargar-torrent*.-\d+-?\d+-(.*?)-\d+x\d+.*?\.html"
|
||||
patron_title_se = "\/serie-[^a_z]{0,10}descargar-torrent*.-\d+-?\d+-(.*?)-\d+-Temp.*?\.html"
|
||||
item.action = "episodios"
|
||||
@@ -181,19 +184,19 @@ def listado(item):
|
||||
pag = True
|
||||
cnt_tot = 10 # Se reduce el numero de items por página porque es un proceso pesado
|
||||
elif item.extra == "documentales" and item.tipo:
|
||||
patron = "<a href='(/doc-descargar-torrent[^']+)'>()"
|
||||
patron = "<a href='((?:[^']+)?/doc-descargar-torrent[^']+)'>()"
|
||||
patron_enlace = "\/doc-descargar-torrent*.-\d+-?\d+-(.*?)\.html"
|
||||
patron_title = "<a href='\/doc-descargar-torrent[^']+'>([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?"
|
||||
patron_title = "<a href='(?:[^']+)?\/doc-descargar-torrent[^']+'>([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?"
|
||||
patron_title_ep = "\/doc-descargar-torrent*.-\d+-?\d+-(.*?)-\d+x\d+.*?\.html"
|
||||
patron_title_se = "\/doc-descargar-torrent*.-\d+-?\d+-(.*?)-\d+-Temp.*?\.html"
|
||||
item.action = "episodios"
|
||||
item.contentType = "tvshow"
|
||||
pag = False
|
||||
else:
|
||||
patron = '<a href="(/doc-descargar-torrent[^"]+)">?'
|
||||
patron = '<a href="((?:[^"]+)?/doc-descargar-torrent[^"]+)">?'
|
||||
patron += '<img src="([^"]+)"[^<]+</a>'
|
||||
patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
|
||||
patron_title = '<a href="/doc-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?'
|
||||
patron_title = '<a href="(?:[^"]+)?/doc-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?'
|
||||
patron_title_ep = "\/doc-descargar-torrent*.-\d+-?\d+-(.*?)-\d+x\d+.*?\.html"
|
||||
patron_title_se = "\/doc-descargar-torrent*.-\d+-?\d+-(.*?)-\d+-Temp.*?\.html"
|
||||
item.action = "episodios"
|
||||
@@ -237,10 +240,13 @@ def listado(item):
|
||||
url_last_page = re.sub(r"\d+$", "9999", url_next_page)
|
||||
data_last = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url_last_page).data)
|
||||
patron_last_page = "<span class='nopaginar'>(\d+)<\/span>"
|
||||
if item.extra == "documentales":
|
||||
item.last_page = int(scrapertools.find_single_match(data_last, patron_last_page))
|
||||
else:
|
||||
item.last_page = int(scrapertools.find_single_match(data_last, patron_last_page)) * (len(matches) / cnt_tot)
|
||||
try:
|
||||
if item.extra == "documentales":
|
||||
item.last_page = int(scrapertools.find_single_match(data_last, patron_last_page))
|
||||
else:
|
||||
item.last_page = int(scrapertools.find_single_match(data_last, patron_last_page)) * (len(matches) / cnt_tot)
|
||||
except:
|
||||
item.last_page = 1
|
||||
|
||||
if matches_cnt > cnt_tot and item.extra == "documentales" and pag:
|
||||
item.next_page = ''
|
||||
@@ -295,10 +301,10 @@ def listado(item):
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
item_local.title = scrapertools.get_match(scrapedurl, patron_enlace)
|
||||
item_local.title = scrapertools.find_single_match(scrapedurl, patron_enlace)
|
||||
item_local.title = item_local.title.replace("-", " ")
|
||||
item_local.url = verificar_url(urlparse.urljoin(item_local.url, scrapedurl))
|
||||
item_local.thumbnail = verificar_url(host + urllib.quote(scrapedthumbnail))
|
||||
item_local.url = verificar_url(urlparse.urljoin(item_local.url, scrapedurl)).replace(' ', '%20')
|
||||
item_local.thumbnail = verificar_url(urlparse.urljoin(host, scrapedthumbnail)).replace(' ', '%20')
|
||||
item_local.contentThumbnail = item_local.thumbnail
|
||||
item_local.infoLabels['year'] = '-' # Al no saber el año, le ponemos "-" y TmDB lo calcula automáticamente
|
||||
|
||||
@@ -429,7 +435,7 @@ def listado(item):
|
||||
|
||||
if info != "" and not item_local.quality:
|
||||
item_local.quality = info
|
||||
if "(hdrip" in title.lower() or "(br" in title.lower() or "(vhsrip" in title.lower() or "(dvdrip" in title.lower() or "(fullb" in title.lower() or "(blu" in title.lower() or "(4k" in title.lower() or "(hevc" in title.lower() or "(imax" in title.lower() or "extendida" in title.lower() or "[720p]" in title.lower() or "[1080p]" in title.lower():
|
||||
if "(hdrip" in title.lower() or "(br" in title.lower() or "(vhsrip" in title.lower() or "(dvdrip" in title.lower() or "(fullb" in title.lower() or "(blu" in title.lower() or "(4k" in title.lower() or "4k" in title.lower() or "(hevc" in title.lower() or "(imax" in title.lower() or "extendida" in title.lower() or "[720p]" in title.lower() or "[1080p]" in title.lower():
|
||||
if not item_local.quality:
|
||||
item_local.quality = scrapertools.find_single_match(title, r'\(.*?\)?\(.*?\)')
|
||||
if not item_local.quality:
|
||||
@@ -515,22 +521,22 @@ def listado_busqueda(item):
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
# busca series y Novedades
|
||||
patron = "<a href='(\/serie-descargar-torrent[^']+)'[^>]+>(.*?)<\/a>"
|
||||
patron = "<a href='((?:[^']+)?\/serie-descargar-torrent[^']+)'[^>]+>(.*?)<\/a>"
|
||||
patron += ".*?<span style='color:\w+;'>([^']+)<\/span>"
|
||||
patron_enlace = "\/serie-descargar-torrents-\d+-\d+-(.*?)\.html"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
# busca pelis y Novedades
|
||||
patron = "<a href='(\/peli-descargar-torrent[^']+)'[^>]+>(.*?)<\/a>"
|
||||
patron = "<a href='((?:[^']+)?\/peli-descargar-torrent[^']+)'[^>]+>(.*?)<\/a>"
|
||||
patron += ".*?<span style='color:\w+;'>([^']+)<\/a>"
|
||||
matches += re.compile(patron, re.DOTALL).findall(data) #Busquedas
|
||||
patron = "<a href='(\/peli-descargar-torrent[^']+)'[^>]+>(.*?)<\/a>"
|
||||
patron = "<a href='((?:[^']+)?\/peli-descargar-torrent[^']+)'[^>]+>(.*?)<\/a>"
|
||||
patron += ".*?<span style='color:\w+;'>([^']+)<\/span>"
|
||||
patron_enlace = "\/peli-descargar-torrent-\d+(.*?)\.html"
|
||||
matches += re.compile(patron, re.DOTALL).findall(data) #Novedades
|
||||
|
||||
# busca docu
|
||||
patron = "<a href='(\/doc-descargar-torrent[^']+)' .*?"
|
||||
patron = "<a href='((?:[^']+)?\/doc-descargar-torrent[^']+)' .*?"
|
||||
patron += "<font Color='\w+'>(.*?)<\/a>.*?"
|
||||
patron += "<td align='right' width='20%'>(.*?)<\/td>"
|
||||
patron_enlace = "\/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
|
||||
@@ -661,7 +667,7 @@ def listado_busqueda(item):
|
||||
item_local.quality = scrapertools.remove_htmltags(scrapedinfo).decode('iso-8859-1').encode('utf8')
|
||||
item_local.quality = item_local.quality.replace("(", "").replace(")", "").replace("[", "").replace("]", "").replace("Documental", "").replace("documental", "")
|
||||
|
||||
item_local.url = verificar_url(urlparse.urljoin(item.url, scrapedurl))
|
||||
item_local.url = verificar_url(urlparse.urljoin(item.url, scrapedurl)).replace(' ', '%20')
|
||||
|
||||
#Preparamos la información básica para TMDB
|
||||
if "/serie-" in scrapedurl or "/doc-" in scrapedurl:
|
||||
@@ -686,7 +692,7 @@ def listado_busqueda(item):
|
||||
if not item_local.contentSeason:
|
||||
item_local.contentSeason = 1
|
||||
|
||||
if "(hdrip" in title.lower() or "(br" in title.lower() or "(vhsrip" in title.lower() or "(dvdrip" in title.lower() or "(fullb" in title.lower() or "(blu" in title.lower() or "(4k" in title.lower() or "(hevc" in title.lower() or "(imax" in title.lower() or "extendida" in title.lower() or "[720p]" in title.lower() or "[1080p]" in title.lower():
|
||||
if "(hdrip" in title.lower() or "(br" in title.lower() or "(vhsrip" in title.lower() or "(dvdrip" in title.lower() or "(fullb" in title.lower() or "(blu" in title.lower() or "(4k" in title.lower() or "4k" in title.lower() or "(hevc" in title.lower() or "(imax" in title.lower() or "extendida" in title.lower() or "[720p]" in title.lower() or "[1080p]" in title.lower():
|
||||
if not item_local.quality:
|
||||
item_local.quality = scrapertools.find_single_match(title, r'\(.*?\)?\(.*?\)')
|
||||
if not item_local.quality:
|
||||
@@ -777,10 +783,10 @@ def findvideos(item):
|
||||
if item.post: #Puede traer datos para una llamada "post". De momento usado para documentales, pero podrían ser series
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = data.replace('"', "'")
|
||||
patron = ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'"
|
||||
patron = ">Pincha.*?<a href='((?:[^']+)?\/uploads\/torrents\/\w+\/.*?\.torrent)'"
|
||||
else:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
patron = "<a href='(secciones.php\?sec\=descargas&ap=contar&tabla=[^']+)'"
|
||||
patron = "<a href='((?:[^']+)?secciones.php\?sec\=descargas&ap=contar&tabla=[^']+)'"
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -795,7 +801,7 @@ def findvideos(item):
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
if not item.armagedon: #Si es un proceso normal, seguimos
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -852,13 +858,12 @@ def findvideos(item):
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug(torrent_data)
|
||||
if not item.armagedon:
|
||||
item_local.url = scrapertools.get_match(torrent_data, ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'")
|
||||
item_local.url = scrapertools.find_single_match(torrent_data, ">Pincha.*?<a href='((?:[^']+)?\/uploads\/torrents\/\w+\/.*?\.torrent)'")
|
||||
item_local.url = verificar_url(urlparse.urljoin(url, item_local.url))
|
||||
|
||||
elif not item.armagedon:
|
||||
item_local.url = url # Ya teníamos el link desde el primer nivel (documentales)
|
||||
item_local.url = url # Ya teníamos el link desde el primer nivel (documentales)
|
||||
item_local.url = item_local.url.replace(" ", "%20")
|
||||
|
||||
if item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
@@ -956,7 +961,7 @@ def episodios(item):
|
||||
|
||||
# Selecciona en tramo que nos interesa
|
||||
data = scrapertools.find_single_match(data_alt,
|
||||
"(<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>.*?)</form>")
|
||||
"(<form name='episodios' action='(?:[^']+)?secciones.php\?sec=descargas\&ap=contar_varios' method='post'>.*?)</form>")
|
||||
|
||||
# Prepara el patrón de búsqueda de: URL, título, fechas y dos valores mas sin uso
|
||||
if '/serie' in item.url:
|
||||
@@ -974,7 +979,7 @@ def episodios(item):
|
||||
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
logger.error("ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data_alt)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
@@ -1054,8 +1059,8 @@ def episodios(item):
|
||||
|
||||
|
||||
def verificar_url(url):
|
||||
if '.com' in url or '.net' in url or '.org' in url:
|
||||
url = url.replace('.com', '.tv').replace('.net', '.tv').replace('.org', '.tv')
|
||||
if '.com' in url or '.net' in url or '.org' in url or '.tv' in url:
|
||||
url = url.replace('.com', host_sufix).replace('.net', host_sufix).replace('.org', host_sufix).replace('.tv', host_sufix)
|
||||
url = url.replace('torrents/tmp/torrent.php?table=peliculas/&name=', 'torrents/peliculas/')
|
||||
url = url.replace('torrents/tmp/torrent.php?table=series/&name=', 'torrents/series/')
|
||||
url = url.replace('torrents/tmp/torrent.php?table=documentales/&name=', 'torrents/documentales/')
|
||||
|
||||
@@ -25,7 +25,8 @@ list_quality = []
|
||||
list_servers = ['torrent']
|
||||
|
||||
channel = "mejortorrent1"
|
||||
host = config.get_setting('domain_name', channel)
|
||||
#host = config.get_setting('domain_name', channel)
|
||||
host = "https://mejortorrent1.net/"
|
||||
|
||||
categoria = channel.capitalize()
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', channel)
|
||||
@@ -889,7 +890,6 @@ def findvideos(item):
|
||||
torrent_data = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=False)
|
||||
except: #error
|
||||
pass
|
||||
|
||||
|
||||
else:
|
||||
#Viene de SERIES y DOCUMENTALES. Generamos una copia de Item para trabajar sobre ella
|
||||
@@ -901,7 +901,7 @@ def findvideos(item):
|
||||
except:
|
||||
pass
|
||||
|
||||
if not torrent_data and not ('location' in torrent_data.headers or 'zip' in torrent_data.headers['content-type']):
|
||||
if not torrent_data and not ('location' in torrent_data.headers or 'zip' in torrent_data.headers):
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
|
||||
@@ -1003,7 +1003,7 @@ def findvideos(item):
|
||||
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
|
||||
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
|
||||
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
|
||||
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item) #Lanzamos Autoplay
|
||||
|
||||
@@ -64,7 +64,7 @@ def peliculas(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' class="next">Next >></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
|
||||
@@ -8,15 +8,15 @@ from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'https://www.muchoporno.xxx'
|
||||
host = 'https://www.pornburst.xxx'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/page3.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/pornstars/"))
|
||||
#itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/sitios/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorias/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/sites/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -39,11 +39,12 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if "/sitios/" in item.url:
|
||||
patron = '<div class="muestra-escena muestra-canales">.*?href="(.*?)">.*?'
|
||||
patron += 'src="(.*?)".*?'
|
||||
patron += '<a title="(.*?)".*?'
|
||||
patron += '</span> (.*?) videos</span>'
|
||||
if "/sites/" in item.url:
|
||||
patron = '<div class="muestra-escena muestra-canales">.*?'
|
||||
patron += 'href="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<a title="([^"]+)".*?'
|
||||
patron += '</span> (\d+) videos</span>'
|
||||
if "/pornstars/" in item.url:
|
||||
patron = '<a class="muestra-escena muestra-pornostar" href="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
@@ -62,8 +63,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + cantidad
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -74,7 +75,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="muestra-escena"\s*href="([^"]+)".*?'
|
||||
patron += 'data-stats-video-name="([^"]+)".*?'
|
||||
@@ -89,8 +90,8 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -100,7 +101,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" type="video/mp4"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -82,6 +82,13 @@ __modo_grafico__ = config.get_setting('modo_grafico', channel_py)
|
||||
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel_py) #Actualización sólo últ. Temporada?
|
||||
timeout = config.get_setting('clonenewpct1_timeout_downloadpage', channel_py) #Timeout downloadpage
|
||||
if timeout == 0: timeout = None
|
||||
try:
|
||||
from core import proxytools
|
||||
if proxytools.channel_proxy_list(host): #Si usa un proxy, ...
|
||||
timeout = timeout * 2 #Duplicamos en timeout
|
||||
except:
|
||||
pass
|
||||
|
||||
fecha_rango = config.get_setting('clonenewpct1_rango_fechas_novedades', channel_py) #Rango fechas para Novedades
|
||||
if fecha_rango == 0: fecha_rango = 'Hoy'
|
||||
elif fecha_rango == 1: fecha_rango = 'Ayer'
|
||||
@@ -173,7 +180,7 @@ def submenu(item):
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = '<li><a\s*class="[^"]+"\s*href="http.*:[^"]+"><i\s*class=.*><\/i>.*Inicio<\/a><\/li>(.+)<\/ul>\s*<\/nav>'
|
||||
patron = '<li><a\s*class="[^"]+"\s*href="[^"]+"><i\s*class="[^"]+".*?><\/i>.*?Inicio.*?<\/a><\/li>(.+)<\/ul>\s*<\/nav>'
|
||||
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
|
||||
if not data or not scrapertools.find_single_match(data, patron):
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
@@ -183,9 +190,12 @@ def submenu(item):
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + data)
|
||||
try:
|
||||
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
|
||||
except:
|
||||
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: (probablemente bloqueada por antivirus)")
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el submenú
|
||||
item, data = generictools.fail_over_newpct1(item, patron)
|
||||
item, data = generictools.fail_over_newpct1(item, patron, timeout=timeout)
|
||||
|
||||
if not data: #Si no ha logrado encontrar nada, salimos
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + item.category + '[/COLOR]: Ningún canal NewPct1 activo'))
|
||||
@@ -203,9 +213,12 @@ def submenu(item):
|
||||
if "pelisyseries.com" in item.channel_host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<li><a href="' + item.channel_host + 'varios/" title="Documentales">Documentales</a></li>'
|
||||
else:
|
||||
data_menu = scrapertools.get_match(data, patron) #Seleccionamos el trozo que nos interesa
|
||||
data_menu = scrapertools.find_single_match(data, patron) #Seleccionamos el trozo que nos interesa
|
||||
if not data_menu:
|
||||
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
try:
|
||||
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
except:
|
||||
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: (probablemente bloqueada por antivirus)")
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
@@ -302,7 +315,7 @@ def submenu_novedades(item):
|
||||
|
||||
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + data)
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el submenú
|
||||
item, data = generictools.fail_over_newpct1(item, patron)
|
||||
item, data = generictools.fail_over_newpct1(item, patron, timeout=timeout)
|
||||
|
||||
if not data: #Si no ha logrado encontrar nada, salimos
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + item.category + '[/COLOR]: Ningún canal NewPct1 activo'))
|
||||
@@ -315,7 +328,7 @@ def submenu_novedades(item):
|
||||
if item.url_alt: del item.url_alt
|
||||
del item.channel_alt
|
||||
|
||||
data = scrapertools.get_match(data, patron) #Seleccionamos el trozo que nos interesa
|
||||
data = scrapertools.find_single_match(data, patron) #Seleccionamos el trozo que nos interesa
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", '"').replace('/series"', '/series/"') #Compatibilidad con mispelisy.series.com
|
||||
|
||||
@@ -392,7 +405,7 @@ def alfabeto(item):
|
||||
|
||||
logger.error("ERROR 01: ALFABETO: La Web no responde o ha cambiado de URL: " + item.url + data)
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el submenú
|
||||
item, data = generictools.fail_over_newpct1(item, patron)
|
||||
item, data = generictools.fail_over_newpct1(item, patron, timeout=timeout)
|
||||
|
||||
if not data: #Si no ha logrado encontrar nada, salimos
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + item.category + '[/COLOR]: Ningún canal NewPct1 activo'))
|
||||
@@ -405,7 +418,7 @@ def alfabeto(item):
|
||||
if item.url_alt: del item.url_alt
|
||||
del item.channel_alt
|
||||
|
||||
data = scrapertools.get_match(data, patron)
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
|
||||
|
||||
@@ -454,7 +467,7 @@ def listado(item):
|
||||
|
||||
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el submenú
|
||||
item, data = generictools.fail_over_newpct1(item, patron)
|
||||
item, data = generictools.fail_over_newpct1(item, patron, timeout=timeout)
|
||||
|
||||
if not data: #Si no ha logrado encontrar nada, salimos
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + item.channel.capitalize() + '[/COLOR]: Ningún canal NewPct1 activo'))
|
||||
@@ -479,7 +492,7 @@ def listado(item):
|
||||
#Selecciona el tramo de la página con el listado de contenidos
|
||||
patron = '<ul class="' + clase + '">(.*?)</ul>'
|
||||
if data:
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
fichas = scrapertools.find_single_match(data, patron)
|
||||
if not fichas and not '<h3><strong>( 0 ) Resultados encontrados </strong>' in data: #error
|
||||
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
@@ -659,7 +672,7 @@ def listado(item):
|
||||
|
||||
title = title.replace("Ver online Serie", "").replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("en Full HD", "").replace("en hd ", "").replace("en HD ", "").replace("MicroHD", "").replace("HD ", "").replace("(Proper)", "").replace("HDTV", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDRip", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVBRIP", "").replace("DVB", "").replace("LINE", "").replace("calidad", " ").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("Serie Animada", " ").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis", "").replace("Descarga gratis", "").replace("Descargar Gratis", "").replace("Descargar gratis", "").replace("en gratis", "").replace("gratis gratis", "").replace("Gratisgratis", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Descargar ", "").replace("Decargar ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4K UHDrip", "").replace("BDremux", "").replace("FULL UHD4K", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("en BluRay", "").replace("BluRay en", "").replace("Bluray en", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").replace("++Sub", "").replace("+-+Sub", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis", "").replace("Descarga gratis", "").replace("Descargar Gratis", "").replace("Descargar gratis", "").replace("en gratis", "").replace("gratis gratis", "").replace("Gratisgratis", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Descargar ", "").replace("Decargar ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4K UHDrip", "").replace("BDremux", "").replace("FULL UHD4K", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("en BluRay", "").replace("BluRay en", "").replace("Bluray en", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").replace("++Sub", "").replace("+-+Sub", "").replace("Directors Cut", "").strip()
|
||||
|
||||
title = re.sub(r'\(\d{4}\)$', '', title)
|
||||
if re.sub(r'\d{4}$', '', title).strip():
|
||||
@@ -815,7 +828,7 @@ def listado_busqueda(item):
|
||||
|
||||
logger.error("ERROR 01: LISTADO_BUSQUEDA: La Web no responde o ha cambiado de URL: " + item.url + item.post + " / DATA: " + data)
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el submenú
|
||||
item, data = generictools.fail_over_newpct1(item, pattern)
|
||||
item, data = generictools.fail_over_newpct1(item, pattern, timeout=timeout_search)
|
||||
|
||||
if not data: #Si no ha logrado encontrar nada, salimos
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + item.channel.capitalize() + '[/COLOR]: Ningún canal NewPct1 activo'))
|
||||
@@ -855,7 +868,7 @@ def listado_busqueda(item):
|
||||
else:
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data_alt = data
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
data = scrapertools.find_single_match(data, pattern)
|
||||
if item.extra == "novedades":
|
||||
pattern = '<a href="(?P<scrapedurl>[^"]+)"\s?' #url
|
||||
pattern += 'title="(?P<scrapedtitle>[^"]+)"[^>]*>' #título
|
||||
@@ -997,7 +1010,7 @@ def listado_busqueda(item):
|
||||
if item_local.category == 'Mispelisyseries': #Esta web no gestiona bien el cambio de episodio a Serie
|
||||
pattern = 'class="btn-torrent">.*?window.location.href = "([^"]+)";' #Patron para .torrent
|
||||
#Como no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el cambio de episodio por serie
|
||||
item_local, data_serie = generictools.fail_over_newpct1(item_local, pattern)
|
||||
item_local, data_serie = generictools.fail_over_newpct1(item_local, pattern, timeout=timeout_search)
|
||||
else:
|
||||
try:
|
||||
data_serie = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item_local.url, timeout=timeout).data)
|
||||
@@ -1008,7 +1021,7 @@ def listado_busqueda(item):
|
||||
if not data_serie or (not scrapertools.find_single_match(data_serie, pattern) and not '<h3><strong>( 0 ) Resultados encontrados </strong>' in data and not '<ul class="noticias-series"></ul></form></div><!-- end .page-box -->' in data):
|
||||
logger.error("ERROR 01: LISTADO_BUSQUEDA: La Web no responde o ha cambiado de URL: " + item_local.url + " / DATA: " + data_serie)
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el cambio de episodio por serie
|
||||
item_local, data_serie = generictools.fail_over_newpct1(item_local, pattern)
|
||||
item_local, data_serie = generictools.fail_over_newpct1(item_local, pattern, timeout=timeout)
|
||||
|
||||
if not data_serie: #Si no ha logrado encontrar nada, salimos
|
||||
title_subs += ["ERR"]
|
||||
@@ -1147,7 +1160,7 @@ def listado_busqueda(item):
|
||||
|
||||
title = title.replace("Ver online Serie", "").replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("en Full HD", "").replace("en hd ", "").replace("en HD ", "").replace("MicroHD", "").replace("HD ", "").replace("(Proper)", "").replace("HDTV", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDRip", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVBRIP", "").replace("DVB", "").replace("LINE", "").replace("calidad", " ").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("Serie Animada", " ").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis", "").replace("Descarga gratis", "").replace("Descargar Gratis", "").replace("Descargar gratis", "").replace("en gratis", "").replace("gratis gratis", "").replace("Gratisgratis", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Descargar ", "").replace("Decargar ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4K UHDrip", "").replace("BDremux", "").replace("FULL UHD4K", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("en BluRay", "").replace("BluRay en", "").replace("Bluray en", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").replace("++Sub", "").replace("+-+Sub", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis", "").replace("Descarga gratis", "").replace("Descargar Gratis", "").replace("Descargar gratis", "").replace("en gratis", "").replace("gratis gratis", "").replace("Gratisgratis", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Descargar ", "").replace("Decargar ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4K UHDrip", "").replace("BDremux", "").replace("FULL UHD4K", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("en BluRay", "").replace("BluRay en", "").replace("Bluray en", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").replace("++Sub", "").replace("+-+Sub", "").replace("Directors Cut", "").strip()
|
||||
|
||||
title = re.sub(r'\(\d{4}\)$', '', title)
|
||||
if re.sub(r'\d{4}$', '', title).strip():
|
||||
@@ -1329,7 +1342,7 @@ def findvideos(item):
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
|
||||
verify_fo = True #Verificamos si el clone a usar está activo
|
||||
item, data = generictools.fail_over_newpct1(item, verify_fo)
|
||||
item, data = generictools.fail_over_newpct1(item, verify_fo, timeout=timeout)
|
||||
|
||||
# Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
@@ -1450,7 +1463,7 @@ def findvideos(item):
|
||||
except: #La web no responde. Probemos las urls de emergencia
|
||||
pass
|
||||
|
||||
patron = 'class="btn-torrent">.*?window.location.href = "(.*?)";' #Patron para .torrent
|
||||
patron = 'class="btn-torrent">.*?window.location.href = (?:parseURL\()?"(.*?)"\)?;' #Patron para .torrent
|
||||
patron_mult = 'torrent:check:status|' + patron + '|<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"'
|
||||
if not scrapertools.find_single_match(data, patron):
|
||||
patron_alt = '<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"' #Patron para .torrent (planetatorrent)
|
||||
@@ -1461,7 +1474,7 @@ def findvideos(item):
|
||||
url_torr = scrapertools.find_single_match(item.channel_host, '(\w+:)//') + url_torr
|
||||
|
||||
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
|
||||
if not data or not scrapertools.find_single_match(data, patron) or not videolibrarytools.verify_url_torrent(url_torr): # Si no hay datos o url, error
|
||||
if not data or not scrapertools.find_single_match(data, patron) or not videolibrarytools.verify_url_torrent(url_torr, timeout=timeout): # Si no hay datos o url, error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
|
||||
@@ -1479,7 +1492,7 @@ def findvideos(item):
|
||||
data = 'xyz123' #Para que no haga más preguntas
|
||||
else:
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el vídeo
|
||||
item, data = generictools.fail_over_newpct1(item, patron_mult)
|
||||
item, data = generictools.fail_over_newpct1(item, patron_mult, timeout=timeout)
|
||||
|
||||
if not data: #Si no ha logrado encontrar nada, verificamos si hay servidores
|
||||
cnt_servidores = 0
|
||||
@@ -1500,7 +1513,7 @@ def findvideos(item):
|
||||
cnt_servidores += 1
|
||||
|
||||
if cnt_servidores == 0:
|
||||
item, data_servidores = generictools.fail_over_newpct1(item, patron) #intentamos recuperar servidores
|
||||
item, data_servidores = generictools.fail_over_newpct1(item, patron, timeout=timeout) #intentamos recuperar servidores
|
||||
|
||||
#Miramos si ha servidores
|
||||
if not data_servidores: #Si no ha logrado encontrar nada nos vamos
|
||||
@@ -1518,7 +1531,7 @@ def findvideos(item):
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
# patrón para la url torrent
|
||||
patron = 'class="btn-torrent">.*?window.location.href = "(.*?)";' #Patron para .torrent
|
||||
patron = 'class="btn-torrent">.*?window.location.href = (?:parseURL\()?"(.*?)"\)?;' #Patron para .torrent
|
||||
if not scrapertools.find_single_match(data, patron):
|
||||
patron = '<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"' #Patron para .torrent (planetatorrent)
|
||||
url_torr = scrapertools.find_single_match(data, patron)
|
||||
@@ -1533,7 +1546,7 @@ def findvideos(item):
|
||||
if not size:
|
||||
size = scrapertools.find_single_match(item.quality, '\s?\[(\d+.?\d*?\s?\w\s?[b|B])\]')
|
||||
if not size and not item.armagedon and not item.videolibray_emergency_urls:
|
||||
size = generictools.get_torrent_size(url_torr) #Buscamos el tamaño en el .torrent
|
||||
size = generictools.get_torrent_size(url_torr, timeout=timeout) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
item.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item.title) #Quitamos size de título, si lo traía
|
||||
item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título
|
||||
@@ -1877,7 +1890,7 @@ def episodios(item):
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
|
||||
verify_fo = True #Verificamos si el clone a usar está activo
|
||||
item, data = generictools.fail_over_newpct1(item, verify_fo)
|
||||
item, data = generictools.fail_over_newpct1(item, verify_fo, timeout=timeout)
|
||||
|
||||
#Limpiamos num. Temporada y Episodio que ha podido quedar por Novedades
|
||||
season_display = 0
|
||||
@@ -1931,7 +1944,7 @@ def episodios(item):
|
||||
patron = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
if data: data_alt = scrapertools.get_match(data, patron)
|
||||
if data: data_alt = scrapertools.find_single_match(data, patron)
|
||||
except: #Algún error de proceso
|
||||
pass
|
||||
|
||||
@@ -1951,7 +1964,7 @@ def episodios(item):
|
||||
logger.error(pattern + data)
|
||||
|
||||
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el vídeo
|
||||
item, data = generictools.fail_over_newpct1(item, patron, pattern)
|
||||
item, data = generictools.fail_over_newpct1(item, patron, pattern, timeout=timeout)
|
||||
|
||||
if not data: #No se ha encontrado ningún canal activo para este vídeo
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + item.channel.capitalize() + '[/COLOR]: Ningún canal NewPct1 activo'))
|
||||
@@ -1997,7 +2010,7 @@ def episodios(item):
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data_sector = scrapertools.get_match(data, pattern)
|
||||
data_sector = scrapertools.find_single_match(data, pattern)
|
||||
if not data_sector:
|
||||
raise
|
||||
data = data_sector
|
||||
|
||||
@@ -42,9 +42,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Categorias":
|
||||
data = scrapertools.get_match(data, '<a href="#">Genres</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data, '<a href="#">Genres</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data, '<a href="#">Studios</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data, '<a href="#">Studios</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -72,7 +72,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title))
|
||||
fanart=thumbnail, plot=plot, contentTitle=title))
|
||||
# <li class='active'><a class=''>1</a></li><li><a rel='nofollow' class='page larger' href='https://pandamovies.pw/movies/page/2'>
|
||||
next_page = scrapertools.find_single_match(data, '<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page == "":
|
||||
|
||||
@@ -6,37 +6,26 @@ import urlparse
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
if item.url == "":
|
||||
item.url = "http://www.peliculaseroticas.net/"
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
patron = '<div class="post"[^<]+'
|
||||
patron += '<a href="([^"]+)">([^<]+)</a[^<]+'
|
||||
patron += '<hr[^<]+'
|
||||
patron += '<a[^<]+<img src="([^"]+)"'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post">.*?'
|
||||
patron += '<a href="([^"]+)">([^<]+)</a>.*?'
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle.strip()
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = ""
|
||||
|
||||
# Añade al listado
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, viewmode="movie", folder=True))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie"))
|
||||
# Extrae la marca de siguiente página
|
||||
if item.url == "http://www.peliculaseroticas.net/":
|
||||
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/2.html"
|
||||
@@ -44,8 +33,6 @@ def mainlist(item):
|
||||
current_page = scrapertools.find_single_match(item.url, "(\d+)")
|
||||
next_page = int(current_page) + 1
|
||||
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html"
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url, folder=True))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -222,7 +222,7 @@ def findvideos(item):
|
||||
calidad = re.sub(r'ts', 'ts-hq', calidad)
|
||||
url = host + "/goto/"
|
||||
url_post = urllib.urlencode({'id': id})
|
||||
server_name = scrapertools.get_match(server, '(\w+)\.').replace("waaw","netutv")
|
||||
server_name = scrapertools.find_single_match(server, '(\w+)\.').replace("waaw","netutv")
|
||||
server_parameters = servertools.get_server_parameters(server_name)
|
||||
icon_server = server_parameters.get("thumbnail", "")
|
||||
extra = "online"
|
||||
@@ -250,7 +250,7 @@ def findvideos(item):
|
||||
idioma = "[COLOR brown]" + idioma + "[/COLOR]"
|
||||
url = host + "/goto/"
|
||||
data_post = urllib.urlencode({'id': id})
|
||||
server_name = scrapertools.get_match(server, '(.*?)\.').strip()
|
||||
server_name = scrapertools.find_single_match(server, '(.*?)\.').strip()
|
||||
icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers",
|
||||
"server_" + server_name + ".png")
|
||||
icon_server = icon_server.replace('streamin', 'streaminto')
|
||||
|
||||
@@ -135,7 +135,7 @@ def categorias(item):
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
patron = '<li><a href="genero/([^"]+)">(.*?)<'
|
||||
patron = '<li><a href="\/?genero\/([^"]+)">(.*?)<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if not matches:
|
||||
|
||||
@@ -35,7 +35,8 @@ def lista(item):
|
||||
if duration:
|
||||
scrapedtitle += " (%s)" % duration
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
|
||||
|
||||
@@ -70,7 +70,7 @@ def peliculas(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=scrapedthumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
|
||||
@@ -10,14 +10,16 @@ from core import httptools
|
||||
|
||||
host = 'https://www.porn300.com'
|
||||
|
||||
#BLOQUEO ANTIVIRUS STREAMCLOUD
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/es/videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/es/mas-vistos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/es/mas-votados/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/es/pornostars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/?page=1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/es/pornostars/?page=1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/es/categorias/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -56,8 +58,11 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/?sort=latest"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
if next_page=="":
|
||||
if "/?page=1" in item.url:
|
||||
next_page=urlparse.urljoin(item.url,"/?page=2")
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -67,7 +72,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)" data-video-id="\d+" title="([^"]+)">.*?'
|
||||
patron += '<img itemprop="thumbnailUrl" src="([^"]+)".*?'
|
||||
@@ -81,7 +86,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle) )
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -91,11 +96,10 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -42,9 +42,9 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if "/category/movies/" in item.url:
|
||||
data = scrapertools.get_match(data,'>Movies</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>Movies</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'>Clips</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>Clips</a>(.*?)</ul>')
|
||||
patron = '<a href=([^"]+)>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -59,16 +59,16 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article id=post-\d+.*?'
|
||||
patron += '<img class="center cover" src=([^"]+) alt="([^"]+)".*?'
|
||||
patron += '<blockquote>.*?<a href=(.*?) target=_blank>'
|
||||
patron += '<blockquote><p> <a href=(.*?) target=_blank'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedthumbnail,scrapedtitle,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class=nextpostslink rel=next href=(.*?)>')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -77,6 +77,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -52,7 +52,7 @@ def lista(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<nav id="page_nav"><a href="(.*?)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
import base64
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -10,6 +12,7 @@ from core import httptools
|
||||
|
||||
host = 'http://www.pornhive.tv/en'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -38,9 +41,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Categorias" :
|
||||
data = scrapertools.get_match(data,'Categories(.*?)Channels')
|
||||
data = scrapertools.find_single_match(data,'Categories(.*?)Channels')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'Channels(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'Channels(.*?)</ul>')
|
||||
patron = '<li><a href="([^"]+)" title="[^"]+">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -66,22 +69,25 @@ def lista(item):
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title))
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle=title))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next ›')
|
||||
if next_page != "" :
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = ';extra_urls\[\d+\]=\'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = base64.b64decode(scrapedurl)
|
||||
itemlist.append(item.clone(action="play", title="%s", url=scrapedurl))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -50,8 +50,8 @@ def categorias(item):
|
||||
else:
|
||||
url = urlparse.urljoin(item.url, scrapedurl + "?o=cm")
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart,
|
||||
thumbnail=scrapedthumbnail))
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail))
|
||||
itemlist.sort(key=lambda x: x.title)
|
||||
return itemlist
|
||||
|
||||
@@ -73,7 +73,7 @@ def peliculas(item):
|
||||
title += ' [HD]'
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, thumbnail=thumbnail))
|
||||
Item(channel=item.channel, action="play", title=title, url=url, fanart=thumbnail, thumbnail=thumbnail))
|
||||
if itemlist:
|
||||
# Paginador
|
||||
patron = '<li class="page_next"><a href="([^"]+)"'
|
||||
@@ -88,7 +88,7 @@ def peliculas(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
@@ -96,3 +96,4 @@ def play(item):
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ def categorias(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
fanart=thumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="direction"><a href="([^"]+)" data-ajax="pagination">')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://qwertty.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -53,7 +54,7 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="videos-list">(.*?)<div class="videos-list">')
|
||||
data = scrapertools.find_single_match(data,'<div class="videos-list">(.*?)<div class="videos-list">')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article id="post-\d+".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
@@ -64,7 +65,7 @@ def lista(item):
|
||||
scrapedplot = ""
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
|
||||
if next_page=="":
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
|
||||
@@ -77,10 +78,11 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data,'<meta itemprop="embedURL" content="([^"]+)"')
|
||||
url = url.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = scrapertools.cachePage(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
# https://www.spankwire.com/EmbedPlayer.aspx?ArticleId=14049072
|
||||
if "xvideos" in url :
|
||||
scrapedurl = scrapertools.find_single_match(data,'setVideoHLS\(\'([^\']+)\'')
|
||||
if "pornhub" in url :
|
||||
|
||||
@@ -46,11 +46,12 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] "
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
@@ -58,22 +59,30 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos'
|
||||
patron = '<div class="category_item_wrapper">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += '<span class="category_count">([^"]+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
cantidad = cantidad.strip()
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?<span class="duration">(.*?)</a>.*?<a title="([^"]+)" href="([^"]+)">'
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?'
|
||||
patron += '<span class="duration">(.*?)</a>.*?'
|
||||
patron += '<a title="([^"]+)" href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,duration,scrapedtitle,scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -82,23 +91,25 @@ def peliculas(item):
|
||||
duration = scrapertools.find_single_match(duration, 'HD</span>(.*?)</span>')
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle
|
||||
else:
|
||||
duration = duration.replace("<span class=\"vr-video\">VR</span>", "")
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
|
||||
title = title.replace(" </span>", "").replace(" ", "")
|
||||
scrapedthumbnail = scrapedthumbnail.replace("{index}.", "1.")
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
if not "/premium/" in url:
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"defaultQuality":true,"format":"",.*?"videoUrl"\:"([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -86,7 +86,7 @@ def mas_vistas(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
data = re.sub(r"<!--.*?-->", "", data)
|
||||
patron = "<div class='widget HTML' id='HTML3'.+?<div class='widget-content'>(.*?)</div>"
|
||||
data = scrapertools.get_match(data, patron)
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
item.data = data
|
||||
item.first = 0
|
||||
return series_seccion(item)
|
||||
@@ -99,7 +99,7 @@ def listado_completo(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
data = re.sub(r"<!--.*?-->", "", data)
|
||||
patron = '<div class="widget HTML" id="HTML10".+?<div class="widget-content">(.*?)</div>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
item.first = 0
|
||||
item.data = data
|
||||
return series_seccion(item)
|
||||
|
||||
@@ -13,25 +13,25 @@ host = "https://www.serviporno.com"
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Útimos videos", url= host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Más votados", url="http://www.serviporno.com/mas-votados/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias",
|
||||
url="http://www.serviporno.com/categorias/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="chicas", title="Chicas", url="http://www.serviporno.com/pornstars/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url="http://www.serviporno.com/search/?q="))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos",
|
||||
url=host + "/ajax/homepage/?page=1", last= host))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Más vistos",
|
||||
url=host + "/ajax/most_viewed/?page=1", last= host + "/mas-vistos/"))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Más votados",
|
||||
url=host + "/ajax/best_rated/?page=1", last= host + "/mas-votados/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Canal",
|
||||
url=host + "/ajax/list_producers/?page=1", last= host + "/sitios/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url= host + "/categorias/"))
|
||||
itemlist.append(Item(channel=item.channel, action="chicas", title="Chicas",
|
||||
url=host + "/ajax/list_pornstars/?page=1", last= host + "/pornstars/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", last=""))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.url = host + '/ajax/new_search/?q=%s&page=1' % texto
|
||||
try:
|
||||
return videos(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -42,65 +42,94 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def get_last_page(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
last_page= int(scrapertools.find_single_match(data,'data-ajax-last-page="(\d+)"'))
|
||||
return last_page
|
||||
|
||||
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '(?s)<div class="wrap-box-escena">.*?'
|
||||
patron += '<div class="box-escena">.*?'
|
||||
patron += '<a\s*href="([^"]+)".*?'
|
||||
patron += 'data-stats-video-name="([^"]+)".*?'
|
||||
patron += '<img\s*src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, title, thumbnail in matches:
|
||||
patron += '<img\s*src="([^"]+)".*?'
|
||||
patron += '<div class="duracion">([^"]+) min</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for url, title, thumbnail,duration in matches:
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + title
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail))
|
||||
|
||||
# Paginador
|
||||
patron = '<a href="([^<]+)">Siguiente »</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if len(matches) > 0:
|
||||
url = "http://www.serviporno.com" + matches[0]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Página Siguiente", url=url, thumbnail="", folder=True))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail, fanart=thumbnail))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def chicas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="box-chica">.*?'
|
||||
patron += '<a href="([^"]+)" title="">.*?'
|
||||
patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+"/>.*?'
|
||||
patron += '</a>[^<]{1}<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?'
|
||||
patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+" />.*?'
|
||||
patron += '<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?'
|
||||
patron += '<a class="total-videos" href="[^"]+" title="">([^<]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, thumbnail, title, videos in matches:
|
||||
url = urlparse.urljoin("http://www.serviporno.com", url)
|
||||
last = urlparse.urljoin(item.url, url)
|
||||
url= last.replace("/pornstar", "/ajax/show_pornstar") + "?page=1"
|
||||
title = title + " (" + videos + ")"
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot=""))
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, fanart=thumbnail))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="chicas", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
|
||||
patron = '<div class="wrap-box-escena">.*?'
|
||||
patron += '<div class="cat box-escena">.*?'
|
||||
patron += '<a href="([^"]+)"><img src="([^"]+)" alt="Webcam" height="150" width="175" border=0 /></a>.*?'
|
||||
patron += '<h4><a href="[^"]+">([^<]+)</a></h4>'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="wrap-box-escena.*?'
|
||||
patron += 'href="([^"]+)"><img src="([^"]+)".*?'
|
||||
patron += '<h4.*?<a href="[^"]+">([^<]+)</a></h4>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, thumbnail, title in matches:
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot=""))
|
||||
last = urlparse.urljoin(item.url, url)
|
||||
url= last.replace("/videos-porno", "/ajax/show_category").replace("/sitio","/ajax/show_producer") + "?page=1"
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, plot=""))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -113,3 +142,4 @@ def play(item):
|
||||
Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail,
|
||||
plot=item.plot, folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ host = 'http://sexgalaxy.net'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/full-movies/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Videos", action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Canales", action="canales", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -39,7 +39,7 @@ def canales(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(host).data
|
||||
data = scrapertools.get_match(data, 'Top Networks</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data, 'Top Networks</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, 'More Categories</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data, 'More Categories</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -81,10 +81,10 @@ def lista(item):
|
||||
if calidad:
|
||||
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page))
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -11,9 +11,9 @@ from platformcode import logger
|
||||
host = 'http://sexkino.to'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("pelisalacarta.sexkino mainlist")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="peliculas", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="lista", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="anual", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
|
||||
@@ -26,7 +26,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -35,9 +35,9 @@ def search(item, texto):
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("pelisalacarta.sexkino categorias")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)" >(.*?)</a> <i>(.*?)</i>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -45,52 +45,77 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " ("+cantidad+")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info("pelisalacarta.sexkino anual")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("pelisalacarta.sexkino peliculas")
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
#hay que hacer que coincida con el buscador
|
||||
patron = '<article.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?>(\d+)</span>'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="poster">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<span class="quality">([^"]+)</span>.*?'
|
||||
patron += '<a href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,date in matches:
|
||||
for scrapedthumbnail,scrapedtitle,calidad,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + date + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
scrapedtitle = scrapedtitle + " (" + calidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page != "":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("pelisalacarta.a0 findvideos")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# <th>Watch online</th><th>Quality</th><th>Language</th><th>Added</th></tr></thead>
|
||||
# <tbody>
|
||||
# <tr id='link-3848'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidzella.me'> <a href='http://sexkino.to/links/69321-5/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3847'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=flashx.tv'> <a href='http://sexkino.to/links/69321-4/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3844'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=openload.co'> <a href='http://sexkino.to/links/69321-3/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3843'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidoza.net'> <a href='http://sexkino.to/links/69321-2/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3842'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=rapidvideo.ws'> <a href='http://sexkino.to/links/69321/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# </tbody></table></div></div></div></div>
|
||||
|
||||
|
||||
|
||||
patron = '<tr id=(.*?)</tr>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
url = scrapertools.find_single_match(match,'href="([^"]+)" target')
|
||||
title = scrapertools.find_single_match(match,'<td><img src=.*?> (.*?)</td>')
|
||||
itemlist.append(item.clone(action="play", title=title, url=url))
|
||||
|
||||
# <a id="link" href="https://vidzella.me/play#GS7D" class="btn" style="background-color:#1e73be">Continue</a>
|
||||
|
||||
patron = '<iframe class="metaframe rptss" src="([^"]+)".*?<li><a class="options" href="#option-\d+">\s+(.*?)\s+<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -101,8 +126,8 @@ def findvideos(item):
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("pelisalacarta.sexkino play")
|
||||
data = scrapertools.cachePage(item.url)
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -19,7 +19,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="SexMUSIC" , action="lista", url=host + "/topics/sexo-music-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Xshows" , action="lista", url=host + "/xshows/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
# itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -42,30 +42,17 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Canal" :
|
||||
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>Adult Porn Parodies</a></li>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">')
|
||||
data = scrapertools.find_single_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">')
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Big tit", url="https://sexofilm.com/?s=big+tits"))
|
||||
|
||||
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
patron = '<a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">')
|
||||
patron = '<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -83,9 +70,9 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"'
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
plot = ""
|
||||
title = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "")
|
||||
itemlist.append(item.clone(action="play", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
|
||||
15
plugin.video.alfa/channels/siska.json
Normal file
15
plugin.video.alfa/channels/siska.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "siska",
|
||||
"name": "siska",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.siska.tv/images/siska.png?50",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
89
plugin.video.alfa/channels/siska.py
Normal file
89
plugin.video.alfa/channels/siska.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'http://www.siska.tv/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "newVideo.php?language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "MostViewed.php?views=month&language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "Channel.php?language=en"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "index.php?category=1&language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "search.php?q=%s&language=en&search=Search" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
data = scrapertools.find_single_match(data,'<div id="content">(.*?)<div class="maincat">')
|
||||
patron = '<a href="(.*?)".*?'
|
||||
patron += '<img src="(.*?)".*?alt="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("Watch Channel ", "")
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
|
||||
thumbnail=thumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
if "catID=" in item.url:
|
||||
patron = '<li><h3><a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)" class="imgt" alt="([^"]+)".*?'
|
||||
patron += '<div class="time">(.*?)</div>'
|
||||
else:
|
||||
patron = '<li><h3><a href=\'([^\']+)\'>.*?'
|
||||
patron += '<img src=\'([^\']+)\' class=\'imgt\' alt=\'(.*?)\'.*?'
|
||||
patron += '<div class=\'time\'>(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
scrapedtime = scrapedtime.replace("Duration: ", "").replace(" : ", ":")
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span>Next')
|
||||
if next_page == "":
|
||||
next_page = scrapertools.find_single_match(data, '<a href=\'([^\']+)\' title=\'Next Page\'>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "spankwire",
|
||||
"name": "spankwire",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://cdn1-static-spankwire.spankcdn.net/apple-touch-icon-precomposed.png",
|
||||
|
||||
@@ -7,18 +7,16 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.spankwire.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/home1/Straight/Month/Duration"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/home1/Straight/Month/Duration"))
|
||||
#itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/Straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -29,7 +27,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -42,7 +40,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)" />.*?<span>([^"]+)</span>'
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)" />.*?'
|
||||
patron += '<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
@@ -50,16 +50,20 @@ def categorias(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/Submitted/59"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video_thumb_wrapper">.*?<a href="([^"]+)".*?data-original="([^"]+)".*?title="([^"]+)".*?<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
patron = '<div class="video_thumb_wrapper">.*?'
|
||||
patron += '<a href="([^"]+)".*?data-original="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += '<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -67,24 +71,24 @@ def peliculas(item):
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
#Para el buscador
|
||||
if next_page_url=="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
if next_page=="":
|
||||
next_page = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel , action="lista" , title="Página Siguiente >>" , text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = scrapertools.get_match(data,'Copy Embed Code(.*?)For Desktop')
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data,'Copy Embed Code(.*?)For Desktop')
|
||||
patron = '<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -38,7 +38,7 @@ def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'PaySites(.*?)<li id="menu-item-28040"')
|
||||
data = scrapertools.find_single_match(data,'PaySites(.*?)<li id="menu-item-28040"')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -55,7 +55,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<a href="#">Categories</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<a href="#">Categories</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
|
||||
@@ -37,8 +37,8 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpageGzip(item.url)
|
||||
patron = '<div class="item-block item-normal col" >.*?'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="item-block item-normal col".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += '</span> ([^"]+)<'
|
||||
|
||||
@@ -40,16 +40,17 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="category-item">(.*?)<div id="goupBlock"')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">\s*(.*?)\s*<'
|
||||
patron = '<div class="thumb-container with-title moviec">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<a title="([^"]+)".*?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl + "/most-recent/"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -67,7 +68,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -78,19 +79,24 @@ def catalogo(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
patron = '<p class="btime">([^"]+)</p>.*?href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)">'
|
||||
data = scrapertools.find_single_match(data,'class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
patron = '<p class="btime">([^"]+)</p>.*?'
|
||||
patron += '>(.*?)<img width=.*?'
|
||||
patron += '="([^"]+)" class="thumb.*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for duracion,scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
for duracion,calidad,scrapedthumbnail,scrapedtitle,scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
if ">HD<" in calidad:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = contentTitle))
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -101,7 +107,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -23,7 +23,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h3>Categories</h3>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<h3>Categories</h3>(.*?)</ul>')
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -56,7 +56,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -69,7 +69,11 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="PornStar", action="catalogo",
|
||||
url=host + '/pornstars/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
|
||||
url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
@@ -100,28 +104,45 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a class="[^"]+" href="([^"]+)">' # url
|
||||
patron += '<img id="[^"]+".*?src="([^"]+)".*?' # img
|
||||
patron += '<span class="title">([^<]+)</span>.*?' # title
|
||||
patron += '<span class="duration">([^<]+)</span>' # time
|
||||
patron += '<span class="duration"(.*?)</a>' # time
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtime in matches:
|
||||
time = scrapertools.find_single_match(scrapedtime, '>([^<]+)</span>')
|
||||
title = "[%s] %s" % (time, scrapedtitle)
|
||||
|
||||
if ">HD<" in scrapedtime:
|
||||
title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail,
|
||||
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="videos",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
return itemlist
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="pornstars">.*?<a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
itemlist.append(Item(channel=item.channel, action="videos", url=url, title=scrapedtitle, fanart=scrapedthumbnail,
|
||||
thumbnail=scrapedthumbnail, viewmode="movie_with_plot"))
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="catalogo",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -133,9 +154,7 @@ def categorias(item):
|
||||
# logger.info(data)
|
||||
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
|
||||
patron += '<span class="count">([^<]+)</span>' # title, vids
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, vids in matches:
|
||||
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
|
||||
title = "%s (%s)" % (scrapedtitle, vids.title())
|
||||
@@ -144,7 +163,6 @@ def categorias(item):
|
||||
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
|
||||
title=title, url=url, thumbnail=thumbnail,
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -153,5 +171,5 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '')
|
||||
itemlist.append(item.clone(url=url, title=item.contentTile))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if item.title=="PornStars" :
|
||||
data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h1>(.*?)</section>')
|
||||
data = scrapertools.find_single_match(data,'</i> Hall Of Fame Pornstars</h1>(.*?)</section>')
|
||||
patron = '<a class="thumb" href="([^"]+)">.*?<img src="([^"]+)".*?<div class="vidcountSp">(.*?)</div>.*?<a class="categoryTitle".*?>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
|
||||
@@ -7,6 +7,7 @@ import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
@@ -94,8 +95,6 @@ def buscartrailer(item, trailers=[]):
|
||||
if not item.show and not item.infoLabels['tvshowtitle']:
|
||||
itemlist.append(item.clone(title=title % config.get_localized_string(70508),
|
||||
action="abandomoviez_search", text_color="green"))
|
||||
itemlist.append(item.clone(title=title % config.get_localized_string(70509),
|
||||
action="jayhap_search", text_color="green"))
|
||||
|
||||
if item.contextual:
|
||||
global window_select, result
|
||||
@@ -120,8 +119,6 @@ def manual_search(item):
|
||||
return youtube_search(item.clone(contentTitle=texto, page=""))
|
||||
elif item.extra == "filmaffinity":
|
||||
return filmaffinity_search(item.clone(contentTitle=texto, page="", year=""))
|
||||
elif item.extra == "jayhap":
|
||||
return jayhap_search(item.clone(contentTitle=texto))
|
||||
|
||||
|
||||
def tmdb_trailers(item, tipo="movie"):
|
||||
@@ -153,11 +150,11 @@ def youtube_search(item):
|
||||
titulo += " trailer"
|
||||
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
|
||||
if item.page != "":
|
||||
data = scrapertools.downloadpage(item.page)
|
||||
data = httptools.downloadpage(item.page).data
|
||||
else:
|
||||
titulo = urllib.quote(titulo)
|
||||
titulo = titulo.replace("%20", "+")
|
||||
data = scrapertools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo)
|
||||
data = httptools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = """"thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?"""
|
||||
patron += """simpleText":"([^"]+).*?"""
|
||||
@@ -200,18 +197,18 @@ def abandomoviez_search(item):
|
||||
|
||||
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
|
||||
if item.page != "":
|
||||
data = scrapertools.downloadpage(item.page)
|
||||
data = httptools.downloadpage(item.page).data
|
||||
else:
|
||||
titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1')
|
||||
post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1',
|
||||
'anioin': item.year, 'anioout': item.year, 'orderby': '1'})
|
||||
url = "http://www.abandomoviez.net/db/busca_titulo_advance.php"
|
||||
item.prefix = "db/"
|
||||
data = scrapertools.downloadpage(url, post=post)
|
||||
data = httptools.downloadpage(url, post=post).data
|
||||
if "No hemos encontrado ninguna" in data:
|
||||
url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php"
|
||||
item.prefix = "indie/"
|
||||
data = scrapertools.downloadpage(url, post=post).decode("iso-8859-1").encode('utf-8')
|
||||
data = httptools.downloadpage(url, post=post).data.decode("iso-8859-1").encode('utf-8')
|
||||
|
||||
itemlist = []
|
||||
patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \
|
||||
@@ -253,7 +250,7 @@ def abandomoviez_search(item):
|
||||
def search_links_abando(item):
|
||||
logger.info()
|
||||
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = []
|
||||
if "Lo sentimos, no tenemos trailer" in data:
|
||||
itemlist.append(item.clone(title=config.get_localized_string(70503), action="", text_color=""))
|
||||
@@ -288,7 +285,7 @@ def search_links_abando(item):
|
||||
progreso.update(10 + (90 * i / len(matches)), message)
|
||||
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
|
||||
|
||||
data_trailer = scrapertools.downloadpage(scrapedurl)
|
||||
data_trailer = httptools.downloadpage(scrapedurl).data
|
||||
trailer_url = scrapertools.find_single_match(data_trailer, 'iframe.*?src="([^"]+)"')
|
||||
trailer_url = trailer_url.replace("embed/", "watch?v=")
|
||||
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
|
||||
@@ -318,12 +315,12 @@ def filmaffinity_search(item):
|
||||
|
||||
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
|
||||
if item.page != "":
|
||||
data = scrapertools.downloadpage(item.page)
|
||||
data = httptools.downloadpage(item.page).data
|
||||
else:
|
||||
params = urllib.urlencode([('stext', item.contentTitle), ('stype%5B%5D', 'title'), ('country', ''),
|
||||
('genre', ''), ('fromyear', item.year), ('toyear', item.year)])
|
||||
url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params
|
||||
data = scrapertools.downloadpage(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
|
||||
itemlist = []
|
||||
patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \
|
||||
@@ -371,7 +368,7 @@ def search_links_filmaff(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if not '<a class="lnkvvid"' in data:
|
||||
itemlist.append(item.clone(title=config.get_localized_string(70503), action="", text_color=""))
|
||||
else:
|
||||
@@ -408,55 +405,6 @@ def search_links_filmaff(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def jayhap_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.extra != "jayhap":
|
||||
item.contentTitle += " trailer"
|
||||
texto = item.contentTitle
|
||||
post = urllib.urlencode({'q': texto, 'yt': 'true', 'vm': 'true', 'dm': 'true',
|
||||
'v': 'all', 'l': 'all', 'd': 'all'})
|
||||
|
||||
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
|
||||
if item.page != "":
|
||||
post += urllib.urlencode(item.page)
|
||||
data = scrapertools.downloadpage("https://www.jayhap.com/load_more.php", post=post)
|
||||
else:
|
||||
data = scrapertools.downloadpage("https://www.jayhap.com/get_results.php", post=post)
|
||||
data = jsontools.load(data)
|
||||
for video in data['videos']:
|
||||
url = video['url']
|
||||
server = video['source'].lower()
|
||||
duration = " (" + video['duration'] + ")"
|
||||
title = video['title'].decode("utf-8") + duration + " [" + server.capitalize() + "]"
|
||||
thumbnail = video['thumbnail']
|
||||
if item.contextual:
|
||||
title = "[COLOR white]%s[/COLOR]" % title
|
||||
itemlist.append(item.clone(action="play", server=server, title=title, url=url, thumbnail=thumbnail,
|
||||
text_color="white"))
|
||||
|
||||
if not itemlist:
|
||||
itemlist.append(item.clone(title=config.get_localized_string(70501) % item.contentTitle,
|
||||
action="", thumbnail="", text_color=""))
|
||||
else:
|
||||
tokens = data['tokens']
|
||||
tokens['yt_token'] = tokens.pop('youtube')
|
||||
tokens['vm_token'] = tokens.pop('vimeo')
|
||||
tokens['dm_token'] = tokens.pop('dailymotion')
|
||||
itemlist.append(item.clone(title=config.get_localized_string(70502), page=tokens, action="jayhap_search", extra="jayhap",
|
||||
thumbnail="", text_color=""))
|
||||
|
||||
if keyboard:
|
||||
if item.contextual:
|
||||
title = "[COLOR green]%s[/COLOR]"
|
||||
else:
|
||||
title = "%s"
|
||||
itemlist.append(item.clone(title=title % config.get_localized_string(70514), action="manual_search",
|
||||
text_color="green", thumbnail="", extra="jayhap"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
try:
|
||||
import xbmcgui
|
||||
|
||||
@@ -49,7 +49,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -60,7 +60,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = 'href="([^"]+)"\s*class="th-video.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
@@ -85,7 +85,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
|
||||
@@ -38,7 +38,7 @@ def novedades(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# <a href="http://tubehentai.com/videos/slave_market_¨c_ep1-595.html"><img class="img" width="145" src="http://tubehentai.com/media/thumbs/5/9/5/./f/595/595.flv-3.jpg" alt="Slave_Market_¨C_Ep1" id="4f4fbf26f36
|
||||
patron = '<a href="(http://tubehentai.com/videos/[^"]+)"><img.*?src="(http://tubehentai.com/media/thumbs/[^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -80,8 +80,8 @@ def play(item):
|
||||
# s1.addParam("flashvars","overlay=http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4
|
||||
# http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4
|
||||
# http://tubehentai.com/media/videos/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4?start=0
|
||||
data = scrapertools.cachePage(item.url)
|
||||
url = scrapertools.get_match(data, 's1.addParam\("flashvars","bufferlength=1&autostart=true&overlay=(.*?\.mp4)')
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, 's1.addParam\("flashvars","bufferlength=1&autostart=true&overlay=(.*?\.mp4)')
|
||||
url = url.replace("/thumbs", "/videos")
|
||||
# url = url+"?start=0"
|
||||
logger.info("url=" + url)
|
||||
|
||||
@@ -9,16 +9,16 @@ from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://www.vidz7.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url="http://www.vidz7.com/"))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url=host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.vidz7.com/category/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
|
||||
url="http://www.vidz7.com/?s="))
|
||||
|
||||
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/category/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url="http://www.vidz7.com"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = "{0}{1}".format(item.url, texto)
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -52,32 +52,27 @@ def categorias(item):
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
|
||||
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?.<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>"
|
||||
patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?"
|
||||
patron += "<div class=\"hd\">(.*?)</div>.*?"
|
||||
patron += "<div class=\"duration\">(.*?)</div>.*?"
|
||||
patron += "<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
for scrapedthumbnail, scrapedhd, duration, scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle.strip()
|
||||
|
||||
scrapedtitle = scrapedtitle.strip()
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
|
||||
# Añade al listado
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, thumbnail=thumbnail, fanart=thumbnail,
|
||||
fulltitle=title, url=url,
|
||||
viewmode="movie", folder=True))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data,
|
||||
'<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">')
|
||||
|
||||
paginacion = scrapertools.find_single_match(data,'<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">')
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -94,5 +89,5 @@ def play(item):
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.title = item.title
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ def catalogo(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -69,7 +69,7 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'<div class="cats-all categories-list">(.*?)</div>')
|
||||
data = scrapertools.find_single_match(data,'<div class="cats-all categories-list">(.*?)</div>')
|
||||
patron = '<a href="([^"]+)".*?>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -84,19 +84,22 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '<span class="time">(.*?)</span>.*?'
|
||||
patron += '<span class="time">(.*?)</span>(.*?)</span>.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
|
||||
for scrapedurl,time,calidad,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace(", ", " & ").replace("(", "(").replace(")", ")")
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
if "hd-marker is-hd" in calidad:
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + "[COLOR red]" + "HD" + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = title))
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -107,7 +110,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" type="video/mp4" label="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
|
||||
@@ -15,9 +15,9 @@ host = 'https://watchpornfree.ws'
|
||||
def mainlist(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodia" , action="lista", url=host + "/category/parodies-hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
@@ -37,18 +37,17 @@ def search(item, texto):
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
# <li class="cat-item cat-item-6"><a href="https://watchpornfree.ws/category/all-girl" >All Girl</a> (2,777)
|
||||
# </li>
|
||||
|
||||
def categorias(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Canal":
|
||||
data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>Studios</a>(.*?)</ul>')
|
||||
if item.title == "Año":
|
||||
data = scrapertools.get_match(data,'>Years</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>Years</a>(.*?)</ul>')
|
||||
if item.title == "Categorias":
|
||||
data = scrapertools.get_match(data,'>XXX Genres</div>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>XXX Genres</div>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)".*?>([^"]+)</a>(.*?)</li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
@@ -62,13 +61,13 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article class="TPost B">.*?<a href="([^"]+)">.*?src="([^"]+)".*?<div class="Title">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -31,7 +31,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapedurl.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
@@ -53,7 +53,7 @@ def videos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.get_match(data, '<article.+?>(.*?)</article>')
|
||||
data = scrapertools.find_single_match(data, '<article.+?>(.*?)</article>')
|
||||
|
||||
# Patron
|
||||
patron = '(?s)<div class="thumb-list__item.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)">.*?'
|
||||
@@ -87,7 +87,7 @@ def categorias(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = scrapertools.get_match(data, '(?s)<div class="all-categories">(.*?)</aside>')
|
||||
data = scrapertools.find_single_match(data, '(?s)<div class="all-categories">(.*?)</aside>')
|
||||
|
||||
patron = '(?s)<li>.*?<a href="([^"]+)".*?>([^<]+).*?</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -44,11 +44,11 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if item.title=="Canal":
|
||||
data = scrapertools.get_match(data,'<div class="footer-banner">(.*?)<div id="footer-copyright">')
|
||||
data = scrapertools.find_single_match(data,'<div class="footer-banner">(.*?)<div id="footer-copyright">')
|
||||
if item.title=="Productora" :
|
||||
data = scrapertools.get_match(data,'<li id="menu-item-16"(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<li id="menu-item-16"(.*?)</ul>')
|
||||
if item.title=="Categorias" :
|
||||
data = scrapertools.get_match(data,'<a>Categories</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'<a>Categories</a>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -64,7 +64,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
@@ -91,11 +91,11 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
variable = scrapertools.find_single_match(data,'<script type=\'text/javascript\'> str=\'([^\']+)\'')
|
||||
resuelta = re.sub("@[A-F0-9][A-F0-9]", lambda m: m.group()[1:].decode('hex'), variable)
|
||||
url = scrapertools.find_single_match(resuelta,'<iframe src="([^"]+)"')
|
||||
data = scrapertools.cachePage(url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
15
plugin.video.alfa/channels/xxxfreeinhd.json
Normal file
15
plugin.video.alfa/channels/xxxfreeinhd.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "xxxfreeinhd",
|
||||
"name": "xxxfreeinhd",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://watchxxxfreeinhd.com/wp-content/uploads/logo2015%20(1).jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
93
plugin.video.alfa/channels/xxxfreeinhd.py
Normal file
93
plugin.video.alfa/channels/xxxfreeinhd.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'https://watchxxxfreeinhd.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?filtre=date&cat=0"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/?display=tube&filtre=rate"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "search.php?q=%s&language=en&search=Search" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = '<noscript>.*?src="([^"]+)".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?'
|
||||
patron += '<span class="nb_cat border-radius-5">(\d+) videos</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
title = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += '<img width="\d+" height="\d+" src="([^"]+)" class=.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail + "|https://watchxxxfreeinhd.com/"
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=thumbnail, plot=plot, fanart=scrapedthumbnail, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.find_single_match(data,'<div class="video-embed">(.*?)</div>')
|
||||
patron = '<noscript>.*?<iframe src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(action="play", title = "%s", url=url ))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -42,9 +42,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Canal" :
|
||||
data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>Studios</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'>Categories</a>(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data,'>Categories</a>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
|
||||
@@ -38,19 +38,20 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data1 = scrapertools.get_match(data,'<h5>Popular Categories<br />(.*?)</aside>')
|
||||
data1 = scrapertools.find_single_match(data,'<h5>Popular Categories<br />(.*?)</aside>')
|
||||
if item.title == "Canal" :
|
||||
data1 = scrapertools.get_match(data,'>Top sites</a>(.*?)</ul>')
|
||||
data1 += scrapertools.get_match(data,'Downloads</h2>(.*?)</ul>')
|
||||
data1 = scrapertools.find_single_match(data,'>Top sites</a>(.*?)</ul>')
|
||||
data1 += scrapertools.find_single_match(data,'Downloads</h2>(.*?)</ul>')
|
||||
patron = '<a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data1)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -67,7 +68,7 @@ def lista(item):
|
||||
elif '1080' in scrapedtitle : title= "[COLOR red]" + "1080p" + "[/COLOR] " + scrapedtitle
|
||||
else: title = scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail,plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next →</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -75,30 +76,3 @@ def lista(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'--more-->(.*?)/a>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)".*?class="external">(.*?)<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=item.title,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<h4>Trending(.*?)</ul>')
|
||||
data = scrapertools.find_single_match(data, '<h4>Trending(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -97,7 +97,7 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, 'var encodings(.*?)var')
|
||||
data = scrapertools.find_single_match(data, 'var encodings(.*?)var')
|
||||
if '360' in data:
|
||||
patron = '"360".*?"filename"\:"(.*?)"'
|
||||
if '720' in data:
|
||||
|
||||
@@ -16,7 +16,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/browse/time/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + "/browse/views/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top_rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/rating/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars", action="catalogo", url=host + "/pornstars/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/alphabetical/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -41,7 +41,7 @@ def catalogo(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data1 = scrapertools.get_match(data,'>Most Popular Pornstars<(.*?)<i class=\'icon-menu-right\'></i></a>')
|
||||
data1 = scrapertools.find_single_match(data,'>Most Popular Pornstars<(.*?)<i class=\'icon-menu-right\'></i></a>')
|
||||
patron = '<a href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<span class="porn-star-name">([^"]+)</span>.*?'
|
||||
@@ -52,7 +52,7 @@ def catalogo(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -66,9 +66,9 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if item.title == "Canal":
|
||||
data = scrapertools.get_match(data,'>All</div>(.*?)<i class=\'icon-menu-right\'></i></a>')
|
||||
data = scrapertools.find_single_match(data,'>All</div>(.*?)<i class=\'icon-menu-right\'></i></a>')
|
||||
if item.title == "Categorias":
|
||||
data = scrapertools.get_match(data,'<div class=\'row alphabetical\'.*?>(.*?)>Popular by Country</h2>')
|
||||
data = scrapertools.find_single_match(data,'<div class=\'row alphabetical\'.*?>(.*?)>Popular by Country</h2>')
|
||||
patron = '<a href="([^"]+)".*?'
|
||||
patron += '<img src=(.*?)>.*?'
|
||||
patron += '>([^<]+) (?:Videos|videos)<'
|
||||
@@ -82,7 +82,7 @@ def categorias(item):
|
||||
title = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=scrapedplot) )
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -93,7 +93,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class=\'video-box-image\'.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
@@ -108,7 +108,7 @@ def lista(item):
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -118,7 +118,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'page_params.video.mediaDefinition =.*?"videoUrl":"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://yuuk.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user