sradicato supporto ai canali per adulti

This commit is contained in:
marco
2020-04-22 20:53:50 +02:00
parent 73079d7989
commit 8bd67a6cd8
280 changed files with 38 additions and 13026 deletions

View File

@@ -21,10 +21,9 @@ se vanno cancellati tutti deve rimanere la voce:
"name": "Nome del canale visualizzato in KOD",
"language": ["ita", "sub-ita"],
"active": false,
"adult": false,
"thumbnail": "",
"banner": "",
"categories": ["movie", "tvshow", "anime", "vos", "documentary", "adult"],
"categories": ["movie", "tvshow", "anime", "vos", "documentary"],
"not_active": ["include_in_newest"],
"settings": [
{

View File

@@ -3,7 +3,6 @@
"name": "Altadefinizione01",
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "altadefinizione01.png",
"banner": "altadefinizione01.png",
"categories": ["movie", "vos"],

View File

@@ -2,7 +2,6 @@
"id": "altadefinizione01_link",
"name": "Altadefinizione01 L",
"active": true,
"adult": false,
"language": ["ita","sub-ita"],
"thumbnail": "altadefinizione01_L.png",
"banner": "altadefinizione01_L.png",

View File

@@ -2,7 +2,6 @@
"id": "altadefinizioneclick",
"name": "AltadefinizioneClick",
"active": true,
"adult": false,
"language": ["ita","sub-ita"],
"thumbnail": "altadefinizioneclick.png",
"bannermenu": "altadefinizioneciclk.png",

View File

@@ -3,7 +3,6 @@
"name": "AnimeForce",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "animeforce.png",
"banner": "animeforce.png",
"categories": ["anime"],

View File

@@ -2,7 +2,6 @@
"id": "animeleggendari",
"name": "AnimePerTutti",
"active": true,
"adult": false,
"language": ["ita", "sub-ita"],
"thumbnail": "animepertutti.png",
"bannermenu": "animepertutti.png",

View File

@@ -2,7 +2,6 @@
"id": "animesaturn",
"name": "AnimeSaturn",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "animesaturn.png",
"banner": "animesaturn.png",

View File

@@ -2,7 +2,6 @@
"id": "animespace",
"name": "AnimeSpace",
"active": false,
"adult": false,
"language": [],
"thumbnail": "",
"banner": "",

View File

@@ -2,7 +2,6 @@
"id": "animesubita",
"name": "AnimeSubIta",
"active": true,
"adult": false,
"language": ["sub-ita"],
"thumbnail": "animesubita.png",
"bannermenu": "animesubita.png",

View File

@@ -2,7 +2,6 @@
"id": "animetubeita",
"name": "AnimeTubeITA",
"active": true,
"adult": false,
"language": ["sub-ita"],
"thumbnail": "animetubeita.png",
"bannermenu": "animetubeita.png",

View File

@@ -2,7 +2,6 @@
"id": "animeunity",
"name": "AnimeUnity",
"active": true,
"adult": false,
"language": ["ita", "sub-ita"],
"thumbnail": "animeunity.png",
"banner": "animeunity.png",

View File

@@ -2,7 +2,6 @@
"id": "animeworld",
"name": "AnimeWorld",
"active": true,
"adult": false,
"language": ["ita", "sub-ita"],
"thumbnail": "animeworld.png",
"banner": "animeworld.png",

View File

@@ -3,7 +3,6 @@
"name": "Casacinema",
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "casacinema.png",
"banner": "casacinema.png",
"categories": ["tvshow", "movie","vos"],

View File

@@ -3,7 +3,6 @@
"name": "La Casa del Cinema",
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "casacinemainfo.png",
"banner": "casacinemainfo.png",
"categories": ["movie", "vos"],

View File

@@ -3,7 +3,6 @@
"name": "Cb01anime",
"language": ["ita", "vos", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "cb01anime.png",
"banner": "cb01anime.png",
"categories": ["anime"],

View File

@@ -3,7 +3,6 @@
"name": "CB01",
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "cb01.png",
"banner": "cb01.png",
"categories": ["tvshow", "movie", "vos", "documentary"],

View File

@@ -3,7 +3,6 @@
"name": "Cinemalibero",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "cinemalibero.png",
"banner": "cinemalibero.png",
"categories": ["movie","tvshow"],

View File

@@ -3,7 +3,6 @@
"name": "Cineteca di Bologna",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "cinetecadibologna.png",
"banner": "cinetecadibologna.png",
"categories": ["documentary"],

View File

@@ -3,7 +3,6 @@
"name": "DreamSub",
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "dreamsub.png",
"banner": "dreamsub.png",
"categories": ["anime", "vos"],

View File

@@ -3,7 +3,6 @@
"name": "D.S.D.A",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "dsda.png",
"banner": "dsda.png",
"categories": ["documentary"],

View File

@@ -2,7 +2,6 @@
"id": "eurostreaming",
"name": "Eurostreaming",
"active": true,
"adult": false,
"language": ["ita","sub-ita"],
"thumbnail": "eurostreaming.png",
"banner": "eurostreaming.png",

View File

@@ -3,7 +3,6 @@
"name": "Fastsubita",
"language": ["sub-ita"],
"active": true,
"adult": false,
"thumbnail": "fastsubita.png",
"banner": "fastsubita.png",
"categories": ["tvshow", "vos"],

View File

@@ -2,7 +2,6 @@
"id": "filmigratis",
"name": "Filmi Gratis",
"active": true,
"adult": false,
"language": ["ita", "sub-ita"],
"thumbnail": "filmigratis.png",
"banner": "filmigratis.png",

View File

@@ -2,7 +2,6 @@
"id": "filmpertutti",
"name": "Filmpertutti",
"active": true,
"adult": false,
"language": ["ita", "sub-ita"],
"thumbnail": "filmpertutti.png",
"banner": "filmpertutti.png",

View File

@@ -2,7 +2,6 @@
"id": "filmsenzalimiticc",
"name": "Filmsenzalimiti CC",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "filmsenzalimiticc.png",
"banner": "",

View File

@@ -2,7 +2,6 @@
"id": "guardaserieclick",
"name": "GuardaSerie.click",
"active": true,
"adult": false,
"language": ["ita", "vos"],
"thumbnail": "guardaserieclick.png",
"bannermenu": "guardaserieclick.png",

View File

@@ -3,7 +3,6 @@
"name": "HD4ME",
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "hd4me.png",
"banner": "hd4me.png",
"categories": ["movie", "vos"],

View File

@@ -2,7 +2,6 @@
"id": "ilcorsaronero",
"name": "ilCorSaRoNeRo",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "ilcorsaronero.png",
"banner": "ilcorsaronero.png",

View File

@@ -2,7 +2,6 @@
"id": "ilgeniodellostreaming",
"name": "IlGenioDelloStreaming",
"active": true,
"adult": false,
"language": ["ita", "sub-ita"],
"thumbnail": "ilgeniodellostreaming.png",
"banner": "ilgeniodellostreaming.png",

View File

@@ -2,7 +2,6 @@
"id": "italiaserie",
"name": "Italia Serie",
"active": true,
"adult": false,
"language": ["ita","sub-ita"],
"thumbnail": "italiaserie.png",
"bannermenu": "italiaserie.png",

View File

@@ -2,7 +2,6 @@
"id": "metalvideo",
"name": "Metal Video",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "metalvideo.png",
"banner": "metalvideo.png",

View File

@@ -2,7 +2,6 @@
"id": "mondoserietv",
"name": "MondoSerieTV",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "mondoserietv.png",
"bannermenu": "mondoserietv.png",

View File

@@ -3,7 +3,6 @@
"name": "Netfreex",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "netfreex.png",
"banner": "netfreex.png",
"categories": ["tvshow", "movie", "anime"],

View File

@@ -2,7 +2,6 @@
"id": "piratestreaming",
"name": "Pirate Streaming",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "piratestreaming.png",
"bannermenu": "piratestreaming.png",

View File

@@ -3,7 +3,6 @@
"name": "PolpoTV",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "polpotv.png",
"banner": "polpotv.png",
"categories": ["movie","tvshow"],

View File

@@ -1,16 +0,0 @@
{
"id": "LIKUOO",
"name": "LIKUOO",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://likuoo.video/files_static/images/logo.jpg",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,100 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.likuoo.video'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item">.*?'
patron += '<a href="([^"]+)" title="(.*?)">.*?'
patron += 'src="(.*?)".*?'
patron += '<div class="runtime">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
scrapedtime = scrapedtime.replace("m", ":").replace("s", " ")
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle
contentTitle = title
thumbnail = "https:" + scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
patron = 'url:\'([^\']+)\'.*?'
patron += 'data:\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl,post in matches:
post = post.replace("%3D", "=")
scrapedurl = host + scrapedurl
logger.debug( item.url +" , "+ scrapedurl +" , " +post )
datas = httptools.downloadpage(scrapedurl, post=post, headers={'Referer':item.url}).data
datas = datas.replace("\\", "")
url = scrapertools.find_single_match(datas, '<iframe src="([^"]+)"')
itemlist.append( Item(channel=item.channel, action="play", title = "%s", url=url ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "TXXX",
"name": "TXXX",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.txxx.com/images/desktop-logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,149 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host = 'http://www.txxx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="channel-thumb">.*?'
patron += '<a href="([^"]+)" title="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches:
scrapedplot = ""
scrapedurl = host + scrapedurl
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" ,
text_color="blue", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="categories-list__link" href="([^"]+)">.*?'
patron += '<span class="categories-list__name cat-icon" data-title="([^"]+)">.*?'
patron += '<span class="categories-list__badge">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,num in matches:
url = urlparse.urljoin(item.url,scrapedurl)
scrapedthumbnail = ""
scrapedplot = ""
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=url ,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '</div>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
contentTitle = scrapedtitle
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="thumb__hd">(.*?)</span>')
duration = scrapertools.find_single_match(scrapedtime, '<span class="thumb__duration">(.*?)</span>')
if scrapedhd != '':
title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
else:
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=title) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next.*?" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
partes = video_url.split('||')
video_url = decode_url(partes[0])
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
video_url += '&' if '?' in video_url else '?'
video_url += 'lip=' + partes[2] + '&lt=' + partes[3]
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
return itemlist
def decode_url(txt):
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
reto = ''; n = 0
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
while n < len(txt):
a = _0x52f6x15.index(txt[n])
n += 1
b = _0x52f6x15.index(txt[n])
n += 1
c = _0x52f6x15.index(txt[n])
n += 1
d = _0x52f6x15.index(txt[n])
n += 1
a = a << 2 | b >> 4
b = (b & 15) << 4 | c >> 2
e = (c & 3) << 6 | d
reto += chr(a)
if c != 64: reto += chr(b)
if d != 64: reto += chr(e)
return urllib.unquote(reto)

View File

@@ -1,12 +0,0 @@
# -*- coding: utf-8 -*-
import os
import sys
# Appends the main plugin dir to the PYTHONPATH if an internal package cannot be imported.
# Examples: In Plex Media Server all modules are under "Code.*" package, and in Enigma2 under "Plugins.Extensions.*"
try:
# from core import logger
import core
except:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))

View File

@@ -1,15 +0,0 @@
{
"id": "absoluporn",
"name": "absoluporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.absoluporn.es/image/deco/logo.gif",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,95 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.absoluporn.es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/wall-date-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="lista", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s-1.html" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '&nbsp;<a href="([^"]+)" class="link1">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<div class="thumb-main-titre"><a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'servervideo = \'([^\']+)\'.*?'
patron += 'path = \'([^\']+)\'.*?'
patron += 'filee = \'([^\']+)\'.*?'
matches = scrapertools.find_multiple_matches(data, patron)
for servervideo,path,filee in matches:
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -1,14 +0,0 @@
{
"id": "alsoporn",
"name": "alsoporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://alsoporn.com/images/alsoporn.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,99 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
import base64
host = 'http://www.alsoporn.com'
def mainlist(item):
logger.info()
itemlist = []
# itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/en/g/All/new/1"))
itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/=%s/" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)" />'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="alsoporn_prev">.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
if not "0:00" in scrapedtime:
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
data = httptools.downloadpage(scrapedurl).data
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?url=", "")
scrapedurl1 = base64.b64decode(scrapedurl1 + "=")
logger.debug(scrapedurl1)
data = httptools.downloadpage(scrapedurl1).data
if "xvideos" in scrapedurl1:
scrapedurl2 = scrapertools.find_single_match(data, 'html5player.setVideoHLS\(\'([^\']+)\'\)')
if "xhamster" in scrapedurl1:
scrapedurl2 = scrapertools.find_single_match(data, '"[0-9]+p":"([^"]+)"').replace("\\", "")
logger.debug(scrapedurl2)
itemlist.append(item.clone(action="play", title=item.title, url=scrapedurl2))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "analdin",
"name": "analdin",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.analdin.com/images/logo-retina.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,113 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.analdin.com/es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/más-reciente/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/más-visto/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/mejor-valorado/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<strong class="popup-title">Canales</strong>(.*?)<strong>Models</strong>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="popup-video-link" href="([^"]+)".*?'
patron += 'thumb="([^"]+)".*?'
patron += '<div class="duration">(.*?)</div>.*?'
patron += '<strong class="title">\s*([^"]+)</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtime,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = title))
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'video_url: \'([^\']+)\''
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
url = scrapedurl
itemlist.append(item.clone(action="play", title=url, url=url))
return itemlist

View File

@@ -1,14 +0,0 @@
{
"id": "beeg",
"name": "Beeg",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "beeg.png",
"banner": "beeg.png",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,122 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import jsontools as json
from core import scrapertools
from core.item import Item
from platformcode import logger
from core import httptools
url_api = ""
Host = "https://beeg.com"
def get_api_url():
global url_api
data = httptools.downloadpage(Host).data
version = re.compile('var beeg_version = ([\d]+)').findall(data)[0]
url_api = Host + "/api/v6/" + version
get_api_url()
def mainlist(item):
logger.info()
get_api_url()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="canal", title="Canal",
url=url_api + "/channels"))
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Categorias",
url=url_api + "/index/main/0/pc", extra="nonpopular"))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = url_api + "/index/tag/0/pc?tag=%s" % (texto)
try:
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
for Video in JSONData["videos"]:
thumbnail = "http://img.beeg.com/236x177/" + str(Video["id"]) + ".jpg"
url= '%s/video/%s?v=2&s=%s&e=%s' % (url_api, Video['svid'], Video['start'], Video['end'])
title = Video["title"]
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot="", show="",
folder=True, contentType="movie"))
# Paginador
Actual = int(scrapertools.find_single_match(item.url, url_api + '/index/[^/]+/([0-9]+)/pc'))
if JSONData["pages"] - 1 > Actual:
scrapedurl = item.url.replace("/" + str(Actual) + "/", "/" + str(Actual + 1) + "/")
itemlist.append(
Item(channel=item.channel, action="videos", title="Página Siguiente", url=scrapedurl, thumbnail="",
viewmode="movie"))
return itemlist
def listcategorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
for Tag in JSONData["tags"]:
url = url_api + "/index/tag/0/pc?tag=" + Tag["tag"]
url = url.replace("%20", "-")
title = '%s (%s)' % (str(Tag["tag"]), str(Tag["videos"]))
itemlist.append(
Item(channel=item.channel, action="videos", title=title, url=url, viewmode="movie", type="item"))
return itemlist
def canal(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
for Tag in JSONData["channels"]:
url = url_api + "/index/channel/0/pc?channel=" + Tag["channel"]
url = url.replace("%20", "-")
title = '%s (%s)' % (str(Tag["ps_name"]), str(Tag["videos"]))
itemlist.append(
Item(channel=item.channel, action="videos", title=title, url=url, viewmode="movie", type="item"))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
for key in JSONData:
videourl = re.compile("([0-9]+p)", re.DOTALL).findall(key)
if videourl:
videourl = videourl[0]
if not JSONData[videourl] == None:
url = JSONData[videourl]
url = url.replace("{DATA_MARKERS}", "data=pc.ES")
if not url.startswith("https:"): url = "https:" + url
title = videourl
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])
itemlist.sort(key=lambda item: item[0])
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "bravoporn",
"name": "bravoporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.bravoporn.com/v/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,89 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.bravoporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/c/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/s/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class=".*?video_block"><a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?alt="([^"]+)".*?'
patron += '<span class="time">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
thumbnail = "https:" + scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" type=\'video/mp4\' title="HQ" />'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "camwhoresbay",
"name": "camwhoresbay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.camwhoresbay.com/images/porntrex.ico",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,114 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.camwhoresbay.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="thumb" src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item ">.*?'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = "http:" + scrapedthumbnail + "|Referer=%s" % item.url
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = scrapedtitle, fanart=thumbnail))
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "canalporno",
"name": "Canalporno",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
"banner": "canalporno.png",
"categories": [
"adult"
]
}

View File

@@ -1,92 +0,0 @@
# -*- coding: utf-8 -*-
import urlparse,re
from core import httptools
from core import scrapertools
from platformcode import logger
host = "http://www.canalporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="lista", title="Útimos videos", url=host + "/ajax/homepage/?page=1"))
itemlist.append(item.clone(action="categorias", title="Canal", url=host + "/ajax/list_producers/?page=1"))
itemlist.append(item.clone(action="categorias", title="PornStar", url=host + "/ajax/list_pornstars/?page=1"))
itemlist.append(item.clone(action="categorias", title="Categorias",
url=host + "/categorias"))
itemlist.append(item.clone(action="search", title="Buscar"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/ajax/new_search/?q=%s&page=1" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "pornstars" in item.url:
patron = '<div class="muestra.*?href="([^"]+)".*?src=\'([^\']+)\'.*?alt="([^"]+)".*?'
else:
patron = '<div class="muestra.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)".*?'
if "Categorias" in item.title:
patron += '<div class="numero">([^<]+)</div>'
else:
patron += '</span> (\d+) vídeos</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, scrapedthumbnail, scrapedtitle, cantidad in matches:
title= "%s [COLOR yellow] %s [/COLOR]" % (scrapedtitle, cantidad)
url= url.replace("/videos-porno/", "/ajax/show_category/").replace("/sitio/", "/ajax/show_producer/").replace("/pornstar/", "/ajax/show_pornstar/")
url = host + url + "?page=1"
itemlist.append(item.clone(action="lista", title=title, url=url, thumbnail=scrapedthumbnail))
if "/?page=" in item.url:
next_page=item.url
num= int(scrapertools.find_single_match(item.url,".*?/?page=(\d+)"))
num += 1
next_page = "?page=" + str(num)
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'data-src="([^"]+)" alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \
'<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl, duration in matches:
title = "[COLOR yellow] %s [/COLOR] %s" % (duration, scrapedtitle)
url = host + scrapedurl
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=scrapedthumbnail))
last=scrapertools.find_single_match(item.url,'(.*?)page=\d+')
num= int(scrapertools.find_single_match(item.url,".*?/?page=(\d+)"))
num += 1
next_page = "page=" + str(num)
if next_page!="":
next_page = last + next_page
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo"))
return itemlist

View File

@@ -1,14 +0,0 @@
{
"id": "cat3plus",
"name": "Cat3plus",
"active": true,
"adult": true,
"language": [],
"thumbnail": "https://i.imgur.com/SJxXKa2.png",
"fanart": "https://i.imgur.com/ejCwTxT.jpg",
"banner": "https://i.imgur.com/bXUyk6m.png",
"categories": [
"movie",
"vos"
]
}

View File

@@ -1,130 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel SleazeMovies -*-
# -*- Created for Alfa-addon -*-
# -*- By Sculkurt -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.cat3plus.com/'
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]
]
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def years(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?"
patron += 'src="([^"]+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, year, img in matches:
itemlist.append(Item(channel = item.channel,
title = scrapedtitle,
url = scrapedurl,
action = "findvideos",
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Extraer la marca de siguiente página
next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'")
if next_page != "":
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "search?q=" + texto
item.extra = "busqueda"
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
data = httptools.downloadpage(url, headers={'Referer': item.url}).data
itemlist.extend(servertools.find_video_items(data=data))
for video in itemlist:
video.channel = item.channel
video.contentTitle = item.contentTitle
video.title = video.server.capitalize()
# Opción "Añadir esta pelicula a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "cinetemagay",
"name": "Cinetemagay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cinetemagay.png",
"banner": "cinetemagay.png",
"categories": [
"adult"
]
}

View File

@@ -1,128 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import config, logger
IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay')
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano",
url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay",
url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)",
url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ"))
itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay",
url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="PGPA",
url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4"))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '&lt;img .*?src=&quot;(.*?)&quot;'
patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[3]
scrapedtitle = scrapedtitle.replace("&apos;", "'")
scrapedtitle = scrapedtitle.replace("&quot;", "'")
scrapedtitle = scrapedtitle.replace("&amp;amp;", "'")
scrapedtitle = scrapedtitle.replace("&amp;#39;", "'")
scrapedurl = match[2]
scrapedthumbnail = match[0]
imagen = ""
scrapedplot = match[1]
tipo = match[1]
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
scrapedplot = "<" + scrapedplot
scrapedplot = scrapedplot.replace("&gt;", ">")
scrapedplot = scrapedplot.replace("&lt;", "<")
scrapedplot = scrapedplot.replace("</div>", "\n")
scrapedplot = scrapedplot.replace("<br />", "\n")
scrapedplot = scrapedplot.replace("&amp;", "")
scrapedplot = scrapedplot.replace("nbsp;", "")
scrapedplot = strip_tags(scrapedplot)
itemlist.append(
Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedurl + scrapedplot, folder=True))
variable = item.url.split("index=")[1]
variable = int(variable)
variable += 100
variable = str(variable)
variable_url = item.url.split("index=")[0]
url_nueva = variable_url + "index=" + variable
itemlist.append(
Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")",
url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva))
return itemlist
def detail(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")
data = data.replace("%3D", "=")
data = data.replace("%3", "?")
data = data.replace("%26", "&")
descripcion = ""
plot = ""
patrondescrip = 'SINOPSIS:(.*?)'
matches = re.compile(patrondescrip, re.DOTALL).findall(data)
if len(matches) > 0:
descripcion = matches[0]
descripcion = descripcion.replace("&nbsp;", "")
descripcion = descripcion.replace("<br/>", "")
descripcion = descripcion.replace("\r", "")
descripcion = descripcion.replace("\n", " ")
descripcion = descripcion.replace("\t", " ")
descripcion = re.sub("<[^>]+>", " ", descripcion)
descripcion = descripcion
try:
plot = unicode(descripcion, "utf-8").encode("iso-8859-1")
except:
plot = descripcion
# Busca los enlaces a los videos de servidores
video_itemlist = servertools.find_video_items(data=data)
for video_item in video_itemlist:
itemlist.append(Item(channel=item.channel, action="play", server=video_item.server,
title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail,
plot=video_item.url, folder=False))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "cliphunter",
"name": "cliphunter",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.cliphunter.com/gfx/new/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,108 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.cliphunter.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/categories/All"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/ratings/yesterday"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<img class=".*?" src="([^"]+)".*?<div class="tr">(.*?)</div>.*?<a href="([^"]+)\s*" class="vttl.*?">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedtime,scrapedurl,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = title ))
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '"url"\:"(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
title = scrapedurl
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "coomelonitas",
"name": "Coomelonitas",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.coomelonitas.com/wp-content/themes/3xTheme/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,65 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host ='http://www.coomelonitas.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host+ "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="all"(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match,'title="([^"]+)"')
url = scrapertools.find_single_match(match,'<a href="([^"]+)"')
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url,
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
if next_page!="":
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "cumlouder",
"name": "Cumlouder",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cumlouder.png",
"banner": "cumlouder.png",
"categories": [
"adult"
]
}

View File

@@ -1,180 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = 'https://www.cumlouder.com'
def mainlist(item):
logger.info()
itemlist = []
config.set_setting("url_error", False, "cumlouder")
itemlist.append(item.clone(title="Últimos videos", action="videos", url= host + "/porn/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url=host + "/girls/"))
itemlist.append(item.clone(title="Listas", action="series", url= host + "/series/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url= host + "/categories/"))
itemlist.append(item.clone(title="Buscar", action="search", url= host + "/search?q=%s"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Mas Populares", action="pornstars", url=host + "/girls/1/"))
for letra in "abcdefghijklmnopqrstuvwxyz":
itemlist.append(item.clone(title=letra.upper(), url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a girl-url=.*?'
patron += 'href="([^"]+)" title="([^"]+)">.*?'
patron += 'data-lazy="([^"]+)".*?'
patron += '<span class="ico-videos sprite"></span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, count), url=url, action="videos", fanart=thumbnail, thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Página Siguiente >>", url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a tag-url=.*?'
patron += 'href="([^"]+)" title="([^"]+)".*?'
patron += 'data-lazy="([^"]+)".*?'
patron += '<span class="cantidad">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=url, action="videos", fanart=thumbnail, thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Página Siguiente >>", url=url))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a onclick=.*?href="([^"]+)".*?\<img src="([^"]+)".*?h2 itemprop="name">([^<]+).*?p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title, count in matches:
itemlist.append(
item.clone(title="%s (%s) " % (title, count), url=urlparse.urljoin(item.url, url), action="videos", fanart=thumbnail, thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Página Siguiente >>", url=url))
return itemlist
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)".*?'
patron += 'data-lazy="([^"]+)".*?'
patron += '<span class="ico-minutos sprite"></span>([^<]+)</span>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration,calidad in matches:
if "hd sprite" in calidad:
title="[COLOR yellow] %s [/COLOR][COLOR red] HD [/COLOR] %s" % (duration, title)
else:
title="[COLOR yellow] %s [/COLOR] %s" % (duration, title)
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(host, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title=title, url=url,
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
fanart=thumbnail, contentType="movie", contentTitle=title))
# Paginador
nextpage = scrapertools.find_single_match(data, '<ul class="paginador"(.*?)</ul>')
matches = re.compile('<a href="([^"]+)" rel="nofollow">Next »</a>', re.DOTALL).findall(nextpage)
if not matches:
matches = re.compile('<li[^<]+<a href="([^"]+)">Next »</a[^<]+</li>', re.DOTALL).findall(nextpage)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Página Siguiente >>", url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" type=\'video/([^\']+)\' label=\'[^\']+\' res=\'([^\']+)\''
url, type, res = re.compile(patron, re.DOTALL).findall(data)[0]
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
elif not url.startswith("http"):
url = "https:" + url.replace("&amp;", "&")
itemlist.append(
Item(channel='cumlouder', action="play", title='Video' + res, contentTitle=type.upper() + ' ' + res, url=url,
server="directo", folder=False))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "czechvideo",
"name": "Czechvideo",
"active": false,
"adult": true,
"language": ["*"],
"thumbnail": "http://czechvideo.org/templates/Default/images/black75.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,87 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host = 'http://czechvideo.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/tags/%s/" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<div class="category">(.*?)</ul>')
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="short-story">.*?'
patron += '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?'
patron += 'div class="short-time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.contentTitle = item.contentTitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "datoporn",
"name": "DatoPorn",
"language": ["*"],
"active": true,
"adult": true,
"thumbnail": "http://i.imgur.com/tBSWudd.png?1",
"banner": "datoporn.png",
"categories": [
"adult"
]
}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie"))
itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+")
return lista(item)
def lista(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
patron = '<div class="videobox">\s*<a href="([^"]+)".*?'
patron += 'url\(\'([^\']+)\'.*?'
patron += '<span>(.*?)<\/span>.*?'
patron += 'class="title">(.*?)<\/a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches:
if "/embed-" not in scrapedurl:
scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html"
if not config.get_setting('unify'):
scrapedtitle = '[COLOR yellow] %s [/COLOR] %s' % (duration , scrapedtitle)
else:
scrapedtitle += ' gb'
scrapedtitle = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
scrapedtitle = scrapedtitle.replace(":", "'")
# logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle)
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, server="datoporn",
fanart=scrapedthumbnail.replace("_t.jpg", ".jpg"), plot = ""))
next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches:
if numero:
scrapedtitle = "%s (%s)" % (scrapedtitle, numero)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "elreyx",
"name": "elreyx",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.elreyx.com/template/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,98 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.elreyx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/peliculasporno.html") )
itemlist.append( Item(channel=item.channel, title="Escenas" , action="lista", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Productora" , action="categorias", url=host + "/index.html") )
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html") )
itemlist.append( Item(channel=item.channel, title="Buscar", action="search") )
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s" % texto + ".html"
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
if item.title == "Categorias" :
patron = '<td><a href="([^<]+)" title="Movies ([^<]+)">.*?</a>'
else:
patron = '<a href="([^<]+)" title="View Category ([^<]+)">.*?</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
url="https:" + scrapedurl
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if not "/peliculasporno" in item.url:
patron = '<div class="notice_image">.*?<a title="([^"]+)" href="([^"]+)">.*?<img src="(.*?)">'
else:
patron = '<div class="captura"><a title="([^"]+)" href="([^"]+)".*?><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
url="https:" + scrapedurl
thumbnail="https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>')
if next_page == "":
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!= "":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<IFRAME SRC="(.*?)"')
if url == "":
url = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
data = httptools.downloadpage(url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "eporner",
"name": "Eporner",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "eporner.png",
"banner": "eporner.png",
"categories": [
"adult"
]
}

View File

@@ -1,141 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from platformcode import logger
host = 'http://www.eporner.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Últimos videos", action="videos", url=host + "/0/"))
itemlist.append(item.clone(title="Más visto", action="videos", url=host + "/most-viewed/"))
itemlist.append(item.clone(title="Mejor valorado", action="videos", url=host + "/top-rated/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url=host + "/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars", url=host + "/pornstars/"))
itemlist.append(item.clone(title=" Alfabetico", action="pornstars_list", url=host + "/pornstars/"))
itemlist.append(item.clone(title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "-")
item.url = host + "/search/%s/" % texto
try:
return videos(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(item.clone(title=letra, url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="mbprofile">.*?'
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<div class="mbtim"><span>Videos: </span>([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=urlparse.urljoin(item.url, url), action="videos",
thumbnail=thumbnail))
# Paginador
next_page = scrapertools.find_single_match(data,"<a href='([^']+)' class='nmnext' title='Next page'>")
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="pornstars", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="addrem-cat">.*?'
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
patron +='<div class="cllnumber">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, cantidad in matches:
url = urlparse.urljoin(item.url, url)
title = title + " " + cantidad
thumbnail = ""
if not thumbnail:
thumbnail = scrapertools.find_single_match(data,'<img src="([^"]+)" alt="%s"> % title')
itemlist.append(item.clone(title=title, url=url, action="videos", thumbnail=thumbnail))
return sorted(itemlist, key=lambda i: i.title)
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="mvhdico"><span>([^<]+)</span>.*?'
patron += '<a href="([^"]+)" title="([^"]+)" id="[^"]+">.*?'
patron += 'src="([^"]+)"[^>]+>.*?'
patron += '<div class="mbtim">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for quality, url, title, thumbnail, duration in matches:
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + quality + "[/COLOR] " +title
itemlist.append(item.clone(title=title, url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
next_page = scrapertools.find_single_match(data,"<a href='([^']+)' class='nmnext' title='Next page'>")
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="videos", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def int_to_base36(num):
"""Converts a positive integer into a base36 string."""
assert num >= 0
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower()
res = ''
while not res or num > 0:
num, i = divmod(num, 36)
res = digits[i] + res
return res
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = "EP: { vid: '([^']+)', hash: '([^']+)'"
vid, hash = re.compile(patron, re.DOTALL).findall(data)[0]
hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36(
int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16))
url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash)
jsondata = httptools.downloadpage(url).json
for source in jsondata["sources"]["mp4"]:
url = jsondata["sources"]["mp4"][source]["src"]
title = source.split(" ")[0]
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])
return sorted(itemlist, key=lambda i: int(i[0].split("p")[0]))

View File

@@ -1,16 +0,0 @@
{
"id": "eroticage",
"name": "eroticage",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://eroticage.net/wp-content/themes/oz-movie-v3/img/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,80 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host = 'http://www.eroticage.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"')
patron = '<a href="(.*?)".*?>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="titleFilm"><a href="([^"]+)">([^"]+)</a>.*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
contentTitle = scrapedtitle
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, fanart=scrapedthumbnail, contentTitle=contentTitle ))
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -1,17 +0,0 @@
{
"id": "eroticasonlinetv",
"name": "eroticasonlinetv",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.peliculaseroticasonline.tv/wp-content/themes/wpeliculaseroticasonlinetv/favicon.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,85 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.peliculaseroticasonline.tv'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="movie-poster"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
plot = ""
url = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle) )
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente &raquo;</a>')
if next_page:
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page ))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
url = urlparse.urljoin(item.url, url)
data = httptools.downloadpage(url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,14 +0,0 @@
{
"id": "erotik",
"name": "Erotik",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"banner": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,90 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = "https://www.youfreeporntube.net"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos",
url= host + "/newvideos.html?&page=1"))
itemlist.append(Item(channel=item.channel, action="lista", title="Populares",
url=host + "/topvideos.html?page=1"))
itemlist.append(
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/browse.html"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url=host + "/search.php?keywords="))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "{0}{1}".format(item.url, texto)
try:
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<div class="pm-li-category"><a href="([^"]+)">.*?.<h3>(.*?)</h3></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, actriz in matches:
itemlist.append(Item(channel=item.channel, action="lista", title=actriz, url=url))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<li><div class=".*?'
patron += '<a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()
itemlist.append(Item(channel=item.channel, action="play", thumbnail=thumbnail, fanart=thumbnail, title=title,
url=url,
viewmode="movie", folder=True))
paginacion = scrapertools.find_single_match(data,
'<li class="active">.*?</li>.*?<a href="([^"]+)">')
if paginacion:
paginacion = urlparse.urljoin(item.url,paginacion)
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente",
url= paginacion))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<div id="video-wrapper">.*?<iframe.*?src="([^"]+)"')
itemlist.append(item.clone(action="play", title=url, url=url ))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "fapality",
"name": "fapality",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://i.imgur.com/Orguh85.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,97 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://fapality.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item"><a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img src="([^"]+)">.*?'
patron += '<div class="right">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
if "/categories/" in item.url:
itemlist = sorted(itemlist, key=lambda i: i.title)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="masonry-item item ".*?'
patron += '<a href="([^"]+)" class="kt_imgrc popfire" title="([^"]+)" >.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle) )
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<source id="video_source_1" src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
url = scrapedurl
itemlist.append(item.clone(action="play", title=url, contentTitle = item.title, url=url))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "fetishshrine",
"name": "fetishshrine",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.fetishshrine.com/images/foot-logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,92 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.fetishshrine.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span class="vids">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" itemprop="url">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span itemprop="duration" class="length">(.*?)</span>(.*?)<span class="thumb-info">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion,calidad in matches:
url = scrapedurl
if ">HD<" in calidad:
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " +scrapedtitle
else:
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=scrapedthumbnail, contentTitle = contentTitle ))
next_page = scrapertools.find_single_match(data,'<li><a data=\'\d+\' href="([^"]+)" title="Next">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'video_url: \'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "filmoviXXX",
"name": "filmoviXXX",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.filmovix.net/wp-content/themes/Video/skins/1-default/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,46 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
# BLOQUEO ESET INTERNET SECURITY
def mainlist(item):
logger.info()
itemlist = []
if item.url=="":
item.url = "http://www.filmovix.net/videoscategory/porno/"
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<h1 class="cat_head">XXX</h1>(.*?)<h3> Novo dodato </h3>')
patron = '<li class="clearfix">.*?'
patron += 'src="([^"]+)".*?'
patron += '<p class="title"><a href="([^"]+)" rel="bookmark" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
contentTitle = scrapedtitle
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle=contentTitle))
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(action="mainlist", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "filmpornoita",
"name": "filmpornoita",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.filmpornoita.net/wp-content/uploads/2015/01/header1.jpg",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,57 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.streamxxxx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url= host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href=\'([^\']+)\' class=\'tag-link-.*?>([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="post" id="post-\d+">.*?'
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
patron += 'src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
plot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)">Next')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "foxtube",
"name": "foxtube",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://fxtimg.com/xlogo_.png.pagespeed.ic.doVRQMV5ub.png|Referer=http://es.foxtube.com",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,123 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://es.foxtube.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="PornStar" , action="catalogo", url=host + '/actrices/'))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/buscador/%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="tco5" href="([^"]+)">.*?'
patron += 'data-origen="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
# <a class="bgco2 tco3" rel="next" href="/actrices/2/">&gt</a>
next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">&gt</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="bgco1"><a class="tco2" href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "/actrices/" in item.url:
data=scrapertools.find_single_match(data,'<section class="container">(.*?)>Actrices similares</h3>')
patron = '<a class="thumb tco1" href="([^"]+)">.*?'
patron += 'src="([^"]+)".*?'
patron += 'alt="([^"]+)".*?'
patron += '<span class="t">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
contentTitle = scrapedtitle
time = scrapertools.find_single_match(duracion, '<i class="m tc2">([^"]+)</i>')
if not 'HD' in duracion :
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
else:
title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail + "|Referer=%s" %host
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">&gt</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data,'<iframe title="video" src="([^"]+)"')
url = url.replace("https://flashservice.xvideos.com/embedframe/", "https://www.xvideos.com/video") + "/"
data = httptools.downloadpage(url).data
patron = 'html5player.setVideoHLS\\(\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "freeporn",
"name": "freeporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://frprn.com/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,101 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://frprn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-raped/"))
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="thumb thumb-\w+">.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img class="lazy" data-original="([^"]+)".*?'
patron += '<div class="title">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
title = scrapertools.find_single_match(scrapedtitle,'<div class="text">([^<]+)<')
if "/categories/" in item.url:
cantidad = scrapertools.find_single_match(scrapedtitle,'<div class="count">(\d+)</div>')
scrapedtitle = scrapertools.find_single_match(scrapedtitle,'<div class="name">([^<]+)</div>')
title = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="thumb">.*?'
patron += '<a href="([^"]+)".*?'
patron += '<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="duration">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<meta property="og:video" content="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
title = scrapedurl
itemlist.append(item.clone(action="play", title=title, contentTitle = scrapedurl, url=scrapedurl))
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "freepornstreams",
"name": "freepornstreams",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://freepornstreams.org/wp-content/uploads/2016/11/FPS_Logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,95 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host = 'http://freepornstreams.org' #es http://xxxstreams.org
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/free-full-porn-movies/"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/free-stream-porn/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
if item.title == "Categorias" :
data = scrapertools.find_single_match(data,'>Top Tags(.*?)</ul>')
else:
data = scrapertools.find_single_match(data,'>Top Sites</a>(.*?)</aside>')
patron = '<a href="([^"]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
if not "Featured" in scrapedtitle:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace ("http://freepornstreams.org/freepornst/stout.php?s=100,75,65:*&#038;u=" , "")
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<article id="post-\d+".*?'
patron += '<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
if '/HD' in scrapedtitle : title= "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
elif 'SD' in scrapedtitle : title= "[COLOR red]" + "SD" + "[/COLOR] " + scrapedtitle
elif 'FullHD' in scrapedtitle : title= "[COLOR red]" + "FullHD" + "[/COLOR] " + scrapedtitle
elif '1080' in scrapedtitle : title= "[COLOR red]" + "1080p" + "[/COLOR] " + scrapedtitle
else: title = scrapedtitle
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
plot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle=title) )
next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)" rel="nofollow"[^<]+>(?:Streaming|Download)'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
if not "ubiqfile" in url:
itemlist.append(item.clone(action='play',title="%s", url=url))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "gotporn",
"name": "gotporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://cdn2-static-cf.gotporn.com/desktop/img/gotporn-logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,129 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.gotporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?page=1"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated?page=1"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-viewed?page=1"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest?page=1"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels?page=1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/results?search_query=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<a href="([^"]+)">'
patron += '<span class="text">([^<]+)</span>'
patron += '<span class="num">([^<]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = "%s %s" % (scrapedtitle,cantidad)
scrapedurl = scrapedurl + "?page=1"
thumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=thumbnail , plot=scrapedplot) )
return itemlist
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
logger.debug(data)
patron = '<header class="clearfix" itemscope>.*?'
patron += '<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = scrapedurl + "?page=1"
thumbnail = "https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=thumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn btn-secondary"><span class="text">Next')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<li class="video-item poptrigger".*?'
patron += 'href="([^"]+)" data-title="([^"]+)".*?'
patron += '<span class="duration">(.*?)</span>.*?'
patron += 'src=\'([^\']+)\'.*?'
patron += '<h3 class="video-thumb-title(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedtime,scrapedthumbnail,quality in matches:
scrapedtime = scrapedtime.strip()
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
if quality:
title = "[COLOR yellow]%s[/COLOR] [COLOR red]HD[/COLOR] %s" % (scrapedtime,scrapedtitle)
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
fanart=thumbnail, plot=plot,))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn btn-secondary')
if "categories" in item.url:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn btn-secondary paginate-show-more')
if "search_query" in item.url:
next_page = scrapertools.find_single_match(data, '<link rel=\'next\' href="([^"]+)">')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<source src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
url += "|Referer=%s" % host
itemlist.append(item.clone(action="play", title = item.title, url=url ))
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "hclips",
"name": "hclips",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.hclips.com/apple-touch-icon-152x152.png?v=3",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,118 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.hclips.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/?"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/?"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="thumb">.*?'
patron += 'src="([^"]+)".*?'
patron += '<strong class="title">([^"]+)</strong>.*?'
patron += '<b>(.*?)</b>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
scrapedplot = ""
title = scrapedtitle + " \(" + vidnum + "\)"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)" class="thumb">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="dur">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, plot=plot, contentTitle = contentTitle))
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" title="Next Page">Next</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
partes = video_url.split('||')
video_url = decode_url(partes[0])
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
video_url += '&' if '?' in video_url else '?'
video_url += 'lip=' + partes[2] + '&lt=' + partes[3]
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
return itemlist
def decode_url(txt):
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
reto = ''; n = 0
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
while n < len(txt):
a = _0x52f6x15.index(txt[n])
n += 1
b = _0x52f6x15.index(txt[n])
n += 1
c = _0x52f6x15.index(txt[n])
n += 1
d = _0x52f6x15.index(txt[n])
n += 1
a = a << 2 | b >> 4
b = (b & 15) << 4 | c >> 2
e = (c & 3) << 6 | d
reto += chr(a)
if c != 64: reto += chr(b)
if d != 64: reto += chr(e)
return urllib.unquote(reto)

View File

@@ -1,16 +0,0 @@
{
"id": "hdzog",
"name": "hdzog",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.hdzog.com/apple-touch-icon-120x120.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,119 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.hdzog.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/new/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<ul class="cf">(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li>.*?<a href="([^"]+)".*?'
patron += '<img class="thumb" src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="videos-count">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
scrapedplot = ""
url= scrapedurl + "?sortby=post_date"
title = scrapedtitle + " \(" + vidnum + "\)"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<ul class="cf">(.*?)<h2>Advertisement</h2>')
patron = '<li>.*?<a href="([^"]+)".*?'
patron += 'src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="time">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
contentTitle = scrapedtitle
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" title="Next Page" data-page-num="\d+">Next page &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
video_url += scrapertools.find_single_match(data, 'video_url\+="([^"]*)"')
partes = video_url.split('||')
video_url = decode_url(partes[0])
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
video_url += '&' if '?' in video_url else '?'
video_url += 'lip=' + partes[2] + '&lt=' + partes[3]
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
return itemlist
def decode_url(txt):
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
reto = ''; n = 0
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
while n < len(txt):
a = _0x52f6x15.index(txt[n])
n += 1
b = _0x52f6x15.index(txt[n])
n += 1
c = _0x52f6x15.index(txt[n])
n += 1
d = _0x52f6x15.index(txt[n])
n += 1
a = a << 2 | b >> 4
b = (b & 15) << 4 | c >> 2
e = (c & 3) << 6 | d
reto += chr(a)
if c != 64: reto += chr(b)
if d != 64: reto += chr(e)
return urllib.unquote(reto)

View File

@@ -1,16 +0,0 @@
{
"id": "hellporno",
"name": "hellporno",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://hellporno.com/apple-touch-icon-152x152.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,89 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://hellporno.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/?page=1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+) - Porn videos">.*?'
patron += '<span>(\d+) videos</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias" , title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="video-thumb"><a href="([^"]+)" class="title".*?>([^"]+)</a>.*?'
patron += '<span class="time">([^<]+)</span>.*?'
patron += '<video muted poster="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,duracion,scrapedthumbnail in matches:
url = scrapedurl
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<source data-fluid-hd src="([^"]+)/?br=\d+"')
if scrapedurl=="":
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)/?br=\d+"')
itemlist.append(item.clone(action="play", title=scrapedurl, url=scrapedurl))
return itemlist

Some files were not shown because too many files have changed in this diff Show More