Merge pull request #493 from paeznet/master

Retoque a los canales y alguno nuevo
This commit is contained in:
Alfa
2018-11-28 11:23:24 -05:00
committed by GitHub
67 changed files with 1940 additions and 136 deletions

View File

@@ -42,7 +42,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedthumbnail = "https:" + scrapedthumbnail
@@ -78,7 +77,6 @@ def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle

View File

@@ -0,0 +1,16 @@
{
"id": "TXXX",
"name": "TXXX",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.txxx.com/images/desktop-logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,135 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.txxx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="channel-thumb">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches:
scrapedplot = ""
scrapedurl = host + scrapedurl
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="c-thumb">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<div class="c-thumb--overlay c-thumb--overlay-title">([^"]+)</div>.*?<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,num in matches:
scrapedplot = ""
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?<span class="thumb__duration">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
contentTitle = scrapedtitle
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
partes = video_url.split('||')
video_url = decode_url(partes[0])
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
video_url += '&' if '?' in video_url else '?'
video_url += 'lip=' + partes[2] + '&lt=' + partes[3]
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
return itemlist
def decode_url(txt):
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
reto = ''; n = 0
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
while n < len(txt):
a = _0x52f6x15.index(txt[n])
n += 1
b = _0x52f6x15.index(txt[n])
n += 1
c = _0x52f6x15.index(txt[n])
n += 1
d = _0x52f6x15.index(txt[n])
n += 1
a = a << 2 | b >> 4
b = (b & 15) << 4 | c >> 2
e = (c & 3) << 6 | d
reto += chr(a)
if c != 64: reto += chr(b)
if d != 64: reto += chr(e)
return urllib.unquote(reto)

View File

@@ -47,7 +47,6 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '&nbsp;<a href="([^"]+)" class="link1">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""

View File

@@ -43,7 +43,6 @@ def catalogo(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,cantidad,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -59,7 +58,6 @@ def categorias(item):
patron = '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)" />'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = scrapedurl.replace("top", "new")

View File

@@ -45,7 +45,6 @@ def catalogo(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -66,7 +65,6 @@ def categorias(item):
patron += 'src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
@@ -105,7 +103,6 @@ def play(item):
data = httptools.downloadpage(item.url).data
patron = 'video_url: \'([^\']+)\''
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
url = scrapedurl
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))

View File

@@ -44,7 +44,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th">.*?<img src="([^"]+)".*?<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"

View File

@@ -44,7 +44,6 @@ def catalogo(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
@@ -63,7 +62,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle

View File

@@ -36,11 +36,8 @@ def search(item, texto):
def categorias(item):
itemlist = []
data = scrapertools.cache_page(item.url)
#data = scrapertools.get_match(data,'<div class="sidetitle">Categorías</div>(.*?)</ul>')
#<li class="cat-item cat-item-203077"><a href="http://www.coomelonitas.com/Categoria/asiaticas">ASIÁTICAS</a>
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""

View File

@@ -43,7 +43,6 @@ def catalogo(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class=""\s+title="([^"]+)"\s+href="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedtitle,scrapedurl in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -59,7 +58,6 @@ def categorias(item):
data = scrapertools.get_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"')
patron = '<a href="(.*?)".*?>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -92,9 +90,8 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<div id="wrapper" class="ortala">(.*?)<div class="butonlar">')
patron = '<iframe.*?src="([^"]+)"'
patron = '<iframe\s+src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append( Item(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
return itemlist
@@ -104,7 +101,7 @@ def play(item):
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videochannel=item.channel

File diff suppressed because one or more lines are too long

View File

@@ -44,7 +44,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" title="([^"]+) porn tube" class="thumb">.*?<img src="([^"]+)".*?<span class="total">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"

View File

@@ -27,10 +27,23 @@ def mainlist(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="mainlist" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -42,7 +42,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="thumb thumb-category">.*?<a href="([^"]+)">.*?<img class="lazy" data-original="([^"]+)">.*?<div class="name">([^"]+)</div>.*?<div class="count">(\d+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"

View File

@@ -46,7 +46,6 @@ def catalogo(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li id="menu-item-\d+".*?u=([^"]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -59,7 +58,6 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '<li><a href="([^"]+)" rel="nofollow">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -75,25 +73,26 @@ def peliculas(item):
patron = '<article id="post-\d+".*?<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
contentTitle = scrapedtitle
title = scrapedtitle
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, fulltitle=title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
# else:
# patron = '<div class="nav-previous"><a href="(.*?)"'
# next_page = re.compile(patron,re.DOTALL).findall(data)
#next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">')
# next_page = next_page[0]
#next_page = host + next_page
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page ) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -43,7 +43,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="thumb">.*?src="([^"]+)".*?<strong class="title">([^"]+)</strong>.*?<b>(.*?)</b>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
scrapedplot = ""
title = scrapedtitle + " \(" + vidnum + "\)"

View File

@@ -43,7 +43,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li>.*?<a href="([^"]+)".*?<img class="thumb" src="([^"]+)" alt="([^"]+)".*?<span class="videos-count">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
scrapedplot = ""
title = scrapedtitle + " \(" + vidnum + "\)"

View File

@@ -41,7 +41,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+) - Porn videos">.*?<span>(\d+) videos</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"

View File

@@ -45,7 +45,6 @@ def catalogo(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="thumbnail" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="thumbnail__info__right">\s+([^"]+)\s+</span>.*?<h5>([^"]+)</h5>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
scrapedplot = ""
cantidad = cantidad.replace(" ", "")
@@ -66,7 +65,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="thumbnail" href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)".*?<i class="mdi mdi-video"></i>([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
cantidad = cantidad.replace(" ", "")
@@ -100,6 +98,7 @@ def peliculas(item):
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []

View File

@@ -42,7 +42,6 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '<option class="level-0" value="([^"]+)">([^"]+)&nbsp;&nbsp;\((.*?)\)<'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,number in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -59,11 +58,10 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
next_page_url = scrapertools.find_single_match(data,'<span class=\'currenttext\'>.*?href=\'([^\']+)\' class=\'inactive\'>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
@@ -75,7 +73,6 @@ def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle

View File

@@ -44,7 +44,6 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<div class="videos">(\d+) video.*?</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,numero in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -61,7 +60,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?data-original="([^"]+)".*?<div class="duration">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
scrapedplot = ""
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
@@ -81,7 +79,6 @@ def play(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = 'video_url: \'([^\']+)\''
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
scrapedplot = ""
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )

View File

@@ -44,7 +44,6 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '<h3><a href="([^"]+)">(.*?)</a> <small>(.*?)</small></h3>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,cantidad in matches:
scrapedplot = scrapedurl.replace("http://mporno.unblckd.org/", "").replace("page1.html", "")
scrapedthumbnail = ""
@@ -58,9 +57,6 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<img class="content_image" src="([^"]+).mp4/.*?" alt="([^"]+)".*?this.src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
@@ -76,14 +72,5 @@ def peliculas(item):
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
# else:
# patron = '<a href=\'([^\']+)\' class="next">Next &gt;&gt;</a>'
# next_page = re.compile(patron,re.DOTALL).findall(data)
# next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">')
# plot = item.plot
# next_page = next_page[0]
# next_page = host + plot + next_page
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page, plot=plot ) )
return itemlist

View File

@@ -41,7 +41,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle

View File

@@ -19,7 +19,6 @@ def mainlist(item):
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/list-movies"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -47,7 +46,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a title=".*?" href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""

View File

@@ -46,6 +46,6 @@ def mainlist(item):
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html"
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page_url, folder=True))
Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url, folder=True))
return itemlist

View File

@@ -41,7 +41,6 @@ def categorias(item):
# data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -79,7 +78,6 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))
return itemlist

View File

@@ -43,9 +43,8 @@ def catalogo(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg> ([^"]+)</li>'
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg>\s+([^"]+) </li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad +")"
@@ -65,11 +64,10 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src="([^"]+)".*?</svg>([^"]+) </small>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad +")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/?sort=latest"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
if next_page_url!="":

View File

@@ -45,7 +45,6 @@ def peliculas(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="timer">(.*?)</span></div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches:
scrapedplot = ""
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
@@ -62,7 +61,6 @@ def play(item):
data = httptools.downloadpage(item.url).data
patron = '"video-setup".*?file: "(.*?)",'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
scrapedurl = str(scrapedurl)
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,

View File

@@ -2,7 +2,6 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
@@ -38,27 +37,22 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<div id="categoriesStraightImages">(.*?)</ul>')
# Extrae las categorias
patron = '<li class="cat_pic" data-category=".*?'
patron += '<a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
if "?" in scrapedurl:
url = urlparse.urljoin(item.url, scrapedurl + "&o=cm")
else:
url = urlparse.urljoin(item.url, scrapedurl + "?o=cm")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart,
thumbnail=scrapedthumbnail))
itemlist.sort(key=lambda x: x.title)
return itemlist
@@ -66,30 +60,21 @@ def categorias(item):
def peliculas(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
videodata = scrapertools.find_single_match(data, 'videos search-video-thumbs">(.*?)<div class="reset"></div>')
# Extrae las peliculas
patron = '<div class="phimage">.*?'
patron += '<a href="([^"]+)" title="([^"]+).*?'
patron += '<var class="duration">([^<]+)</var>(.*?)</div>.*?'
patron += 'data-mediumthumb="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(videodata)
for url, scrapedtitle, duration, scrapedhd, thumbnail in matches:
title = scrapedtitle.replace("&amp;amp;", "&amp;") + " (" + duration + ")"
title = "(" + duration + ") " + scrapedtitle.replace("&amp;amp;", "&amp;")
scrapedhd = scrapertools.find_single_match(scrapedhd, '<span class="hd-thumbnail">(.*?)</span>')
if scrapedhd == 'HD':
title += ' [HD]'
url = urlparse.urljoin(item.url, url)
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, thumbnail=thumbnail))
if itemlist:
# Paginador
patron = '<li class="page_next"><a href="([^"]+)"'
@@ -99,29 +84,16 @@ def peliculas(item):
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", fanart=item.fanart,
url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
quality = scrapertools.find_multiple_matches(data, '"id":"quality([^"]+)"')
for q in quality:
match = scrapertools.find_single_match(data, 'var quality_%s=(.*?);' % q)
match = re.sub(r'(/\*.*?\*/)', '', match).replace("+", "")
url = ""
for s in match.split():
val = scrapertools.find_single_match(data, 'var %s=(.*?);' % s.strip())
if "+" in val:
values = scrapertools.find_multiple_matches(val, '"([^"]+)"')
val = "".join(values)
url += val.replace('"', "")
itemlist.append([".mp4 %s [directo]" % q, url])
data = scrapertools.cachePage(item.url)
patron = '"defaultQuality":true,"format":"","quality":"\d+","videoUrl":"(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
url = scrapedurl.replace("\/", "/")
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
return itemlist

View File

@@ -42,7 +42,6 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '<li><a href="([^<]+)">(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -58,7 +57,6 @@ def peliculas(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?<img data-src="(.*?)".*?<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
scrapedplot = ""
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
@@ -85,4 +83,5 @@ def play(item):
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist
return itemlist

View File

@@ -42,7 +42,6 @@ def catalogo(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="pornstar_link js_mpop js-pop" href="([^"]+)".*?"([^"]+)"\s+title="([^"]+)".*?<div class="ps_info_count">\s+([^"]+)\s+Videos'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] "
@@ -61,7 +60,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"

View File

@@ -42,7 +42,6 @@ def canales (item):
data = scrapertools.get_match(data,'Top Networks</a>(.*?)</ul>')
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -59,7 +58,6 @@ def categorias(item):
data = scrapertools.get_match(data,'More Categories</a>(.*?)</ul>')
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -75,7 +73,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fulltitle=scrapedtitle , plot=scrapedplot , folder=True) )

View File

@@ -47,7 +47,6 @@ def categorias(item):
data = scrapertools.get_match(data,'<div class="tagcloud">(.*?)<p>')
patron = '<a href="(.*?)".*?>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
return itemlist
@@ -60,7 +59,6 @@ def catalogo(item):
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
return itemlist
@@ -71,7 +69,6 @@ def anual(item):
data = httptools.downloadpage(item.url).data
patron = '<li><a href="([^<]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -85,16 +82,14 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace(" Porn DVD", "")
scrapedtitle = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "")
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist

View File

@@ -45,7 +45,6 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">\s*(.*?)\s*<'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""

View File

@@ -18,8 +18,6 @@ def mainlist(item):
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
# itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -30,7 +28,6 @@ def categorias(item):
data = scrapertools.get_match(data,'<h3>Categories</h3>(.*?)</ul>')
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
@@ -45,7 +42,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<div class="post" id="post-\d+">.*?<a href="([^"]+)" title="(.*?)"><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace(" &#8211; Free Porn Download", "")
@@ -63,7 +59,6 @@ def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle

View File

@@ -0,0 +1,16 @@
{
"id": "tnaflix",
"name": "tnaflix",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.tnaflix.com/images/favicons/tnaflix/android-icon-192x192.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.tnaflix.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/new/1"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/?period=month&d=all"))
itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/toprated/?d=all&period=month"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/all/top-rated/1/all"))
itemlist.append( Item(channel=item.channel, title="PornStars" , action="categorias", url=host + "/pornstars"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search.php?what=%s&tab=" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<div class="vidcountSp">(\d+)</div>.*?<a class="categoryTitle channelTitle" href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for cantidad,scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
if item.title=="PornStars" :
data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h2>(.*?)</section>')
patron = '<a class="thumb" href="([^"]+)">.*?<img src="([^"]+)".*?<div class="vidcountSp">(.*?)</div>.*?<a class="categoryTitle".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
scrapedplot = ""
if item.title=="Categorias" :
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
if item.title=="PornStars" :
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "?section=videos"
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<meta itemprop="contentUrl" content="([^"]+)" />'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "tryboobs",
"name": "tryboobs",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://tb3.fuckandcdn.com/tb/tbstatic/v30/common/tryboobs/img/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,104 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.tryboobs.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/week/"))
itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="peliculas", url=host + "/top-rated/week/"))
itemlist.append( Item(channel=item.channel, title="Modelos" , action="modelos", url=host + "/models/model-viewed/1/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def modelos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th-model">.*?src="([^"]+)".*?<span class="roliks"><span>(\d+)</span>.*?<span class="title">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="modelos" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th-cat">.*?<img src="([^"]+)".*?<span>(\d+)</span>.*?<span class="title">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'href="([^"]+)"\s*class="th-video.*?<img src="([^"]+)".*?<span class="time">([^"]+)</span>.*?<span class="title">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,duracion,scrapedtitle in matches:
url = scrapedurl
contentTitle = scrapedtitle
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<video src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "videosXYZ",
"name": "videosXYZ",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://free-porn-videos.xyz/wp-content/uploads/2018/10/cropped-Logo-org-Free-porn-videos.xyz-app-icon-192x192.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,72 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://free-porn-videos.xyz'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/topics/porn-videos/"))
itemlist.append( Item(channel=item.channel, title="Parody" , action="peliculas", url=host + "/topics/free-porn-parodies/"))
itemlist.append( Item(channel=item.channel, title="BigTits" , action="peliculas", url=host + "/?s=big+tit"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("Permalink to Watch ", "").replace("Porn Online", "").replace("Permalink to ", "")
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , contentTitle=scrapedtitle, plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
scrapedurl = scrapedurl.replace("%28", "(").replace("%29", ")")
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "vintagetube",
"name": "vintagetube",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.vintagetube.club'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/tube/last-1/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/tube/popular-1/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s" % texto
item.url = item.url + "/popular-1/"
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="prev prev-ct">.*?<a href="(.*?)">.*?<img src="(.*?)".*?<span class="prev-tit">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = str(scrapedtitle)
scrapedurl = host + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="prev">.*?<a href="(.*?)">.*?<img src="(.*?)">.*?<span class="prev-tit">(.*?)</span>.*?<div class="prev-dur"><span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
scrapedplot = ""
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + str(scrapedtitle)
scrapedurl = scrapedurl.replace("/xxx.php?tube=", "")
scrapedurl = host + scrapedurl
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
current_page = scrapertools.find_single_match(data,'<li><span class="page">(.*?)</span></li>')
next_page = int(current_page) + 1
url = item.url
url_page = current_page + "/"
url = url.replace(url_page, "")
next_page_url = url + str(next_page)+"/"
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'(.*?)\'')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
scrapedurl = scrapedurl.replace ("http:", "")
data = httptools.downloadpage("http:" + scrapedurl).data
else:
data = httptools.downloadpage(scrapedurl).data
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
data = httptools.downloadpage("https:" + scrapedurl).data
media_url = scrapertools.find_single_match(data,'<source src="(.*?)"')
itemlist = []
itemlist.append(Item(channel=item.channel, action="play", title=media_url, fulltitle=media_url, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "vintagexxxsex",
"name": "vintagexxxsex",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,101 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.vintagexxxsex.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/all-top/1/"))
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/all-new/1/"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/all-longest/1/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li><a href="([^"]+)"><i class="fa fa-tag"></i>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = host + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="th">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<span class="th_nm">([^"]+)</span>.*?<i class="fa fa-clock-o"></i>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
contentTitle = scrapedtitle
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
scrapedurl = scrapedurl.replace("/up.php?xxx=", "")
scrapedurl = host + scrapedurl
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
# else:
# patron = '<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"'
# next_page = re.compile(patron,re.DOTALL).findall(data)
# next_page = item.url + next_page[0]
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page[0] , text_color="blue", url=next_page[0] ) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
data = httptools.downloadpage(scrapedurl).data
scrapedurl = scrapertools.find_single_match(data,'<source src="(.*?)"')
if scrapedurl == "":
scrapedurl = "http:" + scrapertools.find_single_match(data,'<iframe src="(.*?)"')
data = httptools.downloadpage(scrapedurl).data
scrapedurl = scrapertools.find_single_match(data,'file: "(.*?)"')
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "vporn",
"name": "vporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://th-us2.vporn.com/images/logo%20Dark%20theme.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'https://www.vporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/newest/month/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/views/month/"))
itemlist.append( Item(channel=item.channel, title="Mejor Valoradas" , action="peliculas", url=host + "/rating/month/"))
itemlist.append( Item(channel=item.channel, title="Favoritas" , action="peliculas", url=host + "/favorites/month/"))
itemlist.append( Item(channel=item.channel, title="Mas Votada" , action="peliculas", url=host + "/votes/month/"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/month/"))
itemlist.append( Item(channel=item.channel, title="PornStar" , action="catalogo", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search?q=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class=\'star\'>.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?<span> (\d+) Videos'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = host + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = scrapertools.get_match(data,'<div class="cats-all categories-list">(.*?)</div>')
patron = '<a href="([^"]+)".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = host + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="video">.*?<a href="([^"]+)".*?<span class="time">(.*?)</span>.*?<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<source src="([^"]+)" type="video/mp4" label="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl,scrapedtitle in matches:
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "watchpornfree",
"name": "watchpornfree",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://watchpornfree.ws/wp-content/uploads/2018/03/Untitled-2.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,75 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
# https://playpornfree.org/ https://mangoporn.net/ https://watchfreexxx.net/ https://losporn.org/ https://xxxstreams.me/ https://speedporn.net/
host = 'https://watchpornfree.ws'
def mainlist(item):
logger.info("")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/movies"))
itemlist.append( Item(channel=item.channel, title="Parodia" , action="peliculas", url=host + "/category/parodies-hd"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/clips-scenes"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info("")
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info("")
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title == "Canal":
data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>')
if item.title == "Año":
data = scrapertools.get_match(data,'>Years</a>(.*?)</ul>')
if item.title == "Categorias":
data = scrapertools.get_match(data,'>XXX Genres</div>(.*?)</ul>')
patron = '<a href="(.*?)".*?>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info("")
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<article class="TPost B">.*?<a href="([^"]+)">.*?src="([^"]+)".*?<div class="Title">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next &raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "webpeliculasporno",
"name": "webpeliculasporno",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.webpeliculasporno.com/wp-content/uploads/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,74 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.webpeliculasporno.com'
def mainlist(item):
logger.info("pelisalacarta.webpeliculasporno mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url= host))
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url= host + "/?display=tube&filtre=views"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url= host + "/?display=tube&filtre=rate"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info("pelisalacarta.gmobi mainlist")
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<li class="border-radius-5 box-shadow">.*?'
patron += 'src="([^"]+)".*?'
patron += '<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle ))
next_page_url = scrapertools.find_single_match(data,'<li><a class="next page-numbers" href="([^"]+)">Next')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "woodrocket",
"name": "woodrocket",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://woodrocket.com/img//logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,75 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://woodrocket.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/porn"))
itemlist.append( Item(channel=item.channel, title="Parodias" , action="peliculas", url=host + "/parodies"))
itemlist.append( Item(channel=item.channel, title="Shows" , action="categorias", url=host + "/series"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="media-panel-image">.*?<img src="(.*?)".*?<a href="(.*?)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = host + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="media-panel-image">.*?<a href="([^"]+)".*?title="([^"]+)".*?<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
plot = ""
contentTitle = scrapedtitle
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
title = scrapedtitle
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">&raquo;</a></li>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<iframe src="(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl
data = httptools.downloadpage(scrapedurl).data
scrapedurl = scrapertools.find_single_match(data,'"quality":"\d*","videoUrl":"(.*?)"')
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "xozilla",
"name": "xozilla",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.xozilla.com/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,107 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.xozilla.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<img class="thumb" src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
thumbnail = "http:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=thumbnail , plot=scrapedplot , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<img class="thumb" src="([^"]+)".*?</i> (\d+) videos</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="item.*?data-original="([^"]+)".*?alt="([^"]+)".*?<div class="duration">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = scrapedurl
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="#videos":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
if next_page_url=="#videos":
next_page_url = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
next_page_url = urlparse.urljoin(item.url,next_page_url) + "/"
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
media_url = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)/\'')
if media_url == "":
media_url = scrapertools.find_single_match(data, 'video_url: \'([^\']+)/\'')
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "xtapes",
"name": "xtapes",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://hd.xtapes.to/wp-content/uploads/xtapes.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,121 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://hd.xtapes.to'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/full-porn-movies/?display=tube&filtre=date"))
itemlist.append( Item(channel=item.channel, title="Peliculas Estudio" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/?filtre=date&cat=0"))
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="peliculas", url=host + "/?display=tube&filtre=views"))
itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/?display=tube&filtre=rate"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/?display=tube&filtre=duree"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title=="Canal":
data = scrapertools.get_match(data,'<div class="footer-banner">(.*?)<div id="footer-copyright">')
else:
data = scrapertools.get_match(data,'<li id="menu-item-16"(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">([^"]+)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<a>Categories</a>(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="border-radius-5 box-shadow">.*?src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?<div class="time-infos".*?>([^"]+)<span class="time-img">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedurl,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle = title, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next video')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
next_page_url = next_page_url.replace("#038;cat=0#038;", "").replace("#038;filtre=views#038;", "").replace("#038;filtre=rate#038;", "").replace("#038;filtre=duree#038;", "")
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
variable = scrapertools.find_single_match(data,'<script type=\'text/javascript\'> str=\'([^\']+)\'')
resuelta = re.sub("@[A-F0-9][A-F0-9]", lambda m: m.group()[1:].decode('hex'), variable)
url = scrapertools.find_single_match(resuelta,'<iframe src="([^"]+)"')
data = scrapertools.cachePage(url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "xxxparodyhd",
"name": "xxxparodyhd",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://xxxparodyhd.net/wp-content/uploads/2018/04/parodyhd-1.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,74 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'https://xxxparodyhd.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/genre/new-release/"))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/movies/"))
itemlist.append( Item(channel=item.channel, title="Parodias" , action="peliculas", url=host + "/genre/parodies/"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/genre/clips-scenes/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/categories"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title == "Canal" :
data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>')
else:
data = scrapertools.get_match(data,'<div class=\'sub-container\' style=\'display: none;\'><ul class=\'sub-menu\'>(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div data-movie-id="\d+" class="ml-item">.*?<a href="([^"]+)".*?oldtitle="([^"]+)".*?<img src="([^"]+)".*?rel="tag">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,year in matches:
scrapedplot = ""
scrapedtitle = str(scrapedtitle) + " " + year
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class=\'active\'>.*?href=\'([^\']+)\'>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "xxxstreams",
"name": "xxxstreams",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://xxxstreams.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url= host + "/category/full-porn-movie-stream/"))
itemlist.append( Item(channel=item.channel, title="Clips" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/category/full-porn-movie-stream/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<li id="menu-item.*?class="menu-item menu-item-type-taxonomy.*?<a href="([^<]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<div class="entry-content">.*?<img src="([^"]+)".*?<a href="([^<]+)".*?<span class="screen-reader-text">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next &rarr;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data,'--more--></p>(.*?)/a></p>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)".*?class="external">(.*?)<'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=item.title, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "youjizz",
"name": "youjizz",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://cdne-static.yjcontentdelivery.com/app/1/images/yjlogo.jpeg",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.youjizz.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest-clips/1.html"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/1.html"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated-week/1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s-1.html" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h4>Trending Categories</h4>(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Big Tits" , url="https://www.youjizz.com/search/big-tits-1.html?" , folder=True) )
patron = '<li><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="video-item">.*?class="frame image" href="([^"]+)".*?data-original="([^"]+)" />.*?<div class="video-title">.*?>(.*?)</a>.*?<span class="time">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = "http:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a class="pagination-next" href="([^"]+)">Next &raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
media_url = scrapertools.find_single_match(data, '"filename"\:"(.*?)"')
media_url = "https:" + media_url.replace("\\", "")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -0,0 +1,16 @@
{
"id": "youporn",
"name": "youporn",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "https://fs.ypncdn.com/cb/bundles/youpornwebfront/images/l_youporn_black.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,110 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.youporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/browse/time/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/browse/views/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top_rated/"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/most_popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/alphabetical/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?query=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<a href="/pornstars/most_popular/" class="selected">All</a>(.*?)<i class=\'icon-menu-right\'></i></a>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)".*?data-original="([^"]+)".*?<span class="porn-star-name">([^"]+)</span>.*?<span class="video-count">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" data-page-number=.*?>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<div class=\'row alphabetical\'.*?>(.*?)<h2 class="heading4">Popular by Country</h2>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)".*?data-original="([^"]+)".*?<p>([^"]+)<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedtitle = scrapedtitle + " (" + cantidad +")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class=\'video-box-image\'.*?data-original="([^"]+)".*?<div class="video-box-title">([^"]+)</div>.*?<div class="video-duration">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<div class="prev-next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = 'page_params.video.mediaDefinition =.*?"videoUrl":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
return itemlist

View File

@@ -0,0 +1,17 @@
{
"id": "yuuk",
"name": "yuuk",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://yuuk.net/wp-content/uploads/2018/06/yuuk_net_logo.png",
"banner": "",
],
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://yuuk.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host+ "/?s=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist.append( Item(channel=item.channel, action="peliculas", title="Big Tits" , url="http://yuuk.net/?s=big+tit" , folder=True) )
patron = 'menu-item-object-category"><a href="([^"]+)">.*?</style>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist