y alguno mas
This commit is contained in:
16
plugin.video.alfa/channels/LIKUOO.json
Normal file
16
plugin.video.alfa/channels/LIKUOO.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "LIKUOO",
|
||||
"name": "LIKUOO",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://likuoo.video/files_static/images/logo.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
88
plugin.video.alfa/channels/LIKUOO.py
Normal file
88
plugin.video.alfa/channels/LIKUOO.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://www.likuoo.video'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item">.*?<a href="([^"]+)" title="(.*?)">.*?src="(.*?)".*?<div class="runtime">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedtime = scrapedtime.replace("m", ":").replace("s", " ")
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
return itemlist
|
||||
|
||||
15
plugin.video.alfa/channels/javlin.json
Normal file
15
plugin.video.alfa/channels/javlin.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "javlin",
|
||||
"name": "javlin",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://javl.in/wp-content/uploads/2015/07/favicon1.ico",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
85
plugin.video.alfa/channels/javlin.py
Normal file
85
plugin.video.alfa/channels/javlin.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
|
||||
host = 'http://javl.in'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("pelisalacarta.gmobi mainlist")
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host+ "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<option class="level-0" value="([^"]+)">([^"]+) \((.*?)\)<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,number in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle) + " ("+ str(number) + ")"
|
||||
scrapedurl = "http://javl.in/?cat=" + scrapedurl
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/justporn.json
Normal file
16
plugin.video.alfa/channels/justporn.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "justporn",
|
||||
"name": "justporn",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://xxx.justporno.tv/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
89
plugin.video.alfa/channels/justporn.py
Normal file
89
plugin.video.alfa/channels/justporn.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://xxx.justporno.tv'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/latest-updates/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="peliculas", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<div class="videos">(\d+) video.*?</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,numero in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " (" + numero + ")"
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?data-original="([^"]+)".*?<div class="duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
scrapedurl = "http://xxx.justporno.tv/embed/" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = 'video_url: \'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/mporno.json
Normal file
16
plugin.video.alfa/channels/mporno.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "mporno",
|
||||
"name": "mporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://mporno.tv/templates/fluidporn/img/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
89
plugin.video.alfa/channels/mporno.py
Normal file
89
plugin.video.alfa/channels/mporno.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://mporno.tv'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/most-recent/", plot="/most-recent/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/", plot="/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url=host + "/most-viewed/", plot="/most-viewed/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/", plot="/longest/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/videos/%s/page1.html" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<h3><a href="([^"]+)">(.*?)</a> <small>(.*?)</small></h3>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = scrapedurl.replace("http://mporno.unblckd.org/", "").replace("page1.html", "")
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " " + cantidad
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
|
||||
|
||||
patron = '<img class="content_image" src="([^"]+).mp4/.*?" alt="([^"]+)".*?this.src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
contentTitle = scrapedtitle
|
||||
title = scrapedtitle
|
||||
scrapedurl = scrapedurl.replace("/thumbs/", "/videos/") + ".mp4"
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = item.plot
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' class="next">Next >></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
|
||||
# else:
|
||||
# patron = '<a href=\'([^\']+)\' class="next">Next >></a>'
|
||||
# next_page = re.compile(patron,re.DOTALL).findall(data)
|
||||
# next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">')
|
||||
# plot = item.plot
|
||||
# next_page = next_page[0]
|
||||
# next_page = host + plot + next_page
|
||||
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page, plot=plot ) )
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/muchoporno.json
Normal file
16
plugin.video.alfa/channels/muchoporno.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "muchoporno",
|
||||
"name": "muchoporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.muchoporno.xxx/assets/css/logo/images/sprite-muchoporno.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
85
plugin.video.alfa/channels/muchoporno.py
Normal file
85
plugin.video.alfa/channels/muchoporno.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.muchoporno.xxx'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorias/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="muestra-escena"\s*href="([^"]+)".*?data-stats-video-name="([^"]+)".*?<img src="([^"]+)".*?<span class="ico-minutos sprite" title="Length"></span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<source src="([^"]+)" type="video/mp4"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
title = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/pandamovie.json
Normal file
16
plugin.video.alfa/channels/pandamovie.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "pandamovie",
|
||||
"name": "pandamovie",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.pandamovies.com/templates/pandamovies/images/logo.png?v1482157699",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
81
plugin.video.alfa/channels/pandamovie.py
Normal file
81
plugin.video.alfa/channels/pandamovie.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host= 'https://pandamovies.pw'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
if item.title == "Categorias" :
|
||||
data = scrapertools.get_match(data,'<a href="#">Genres</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<a href="#">Studios</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a title=".*?" href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl.replace("https:", "")
|
||||
scrapedurl = "https:" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<a class="clip-link" title="([^"]+)" href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if next_page_url =="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<a.*?href="([^"]+)" >Next »</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/perfectgirls.json
Normal file
16
plugin.video.alfa/channels/perfectgirls.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "perfectgirls",
|
||||
"name": "perfectgirls",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.perfectgirls.net/images/no-sprite/perfect-girl-regular.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
86
plugin.video.alfa/channels/perfectgirls.py
Normal file
86
plugin.video.alfa/channels/perfectgirls.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.perfectgirls.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
url = urlparse.urljoin(item.url,scrapedurl) + "/1"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=url , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="list__item_link"><a href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)".*?<time>([^"]+)</time>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,time in matches:
|
||||
plot = ""
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/porn300.json
Normal file
16
plugin.video.alfa/channels/porn300.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "porn300",
|
||||
"name": "porn300",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.porn300.com/android-icon-192x192.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
112
plugin.video.alfa/channels/porn300.py
Normal file
112
plugin.video.alfa/channels/porn300.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.porn300.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/es/videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/es/mas-vistos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/es/mas-votados/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/es/pornostars/?sort=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/es/categorias/?sort=videos"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/es/buscar/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg> ([^"]+)</li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src="([^"]+)".*?</svg>([^"]+) </small>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)" data-video-id="\d+" title="([^"]+)">.*?<img itemprop="thumbnailUrl" src="([^"]+)".*?</svg>\s+(.*?) </li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<source src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/porneq.json
Normal file
16
plugin.video.alfa/channels/porneq.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "porneq",
|
||||
"name": "porneq",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://porneq.com/uploads/porneq-logo-home-png554cf1a970e6d.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
71
plugin.video.alfa/channels/porneq.py
Normal file
71
plugin.video.alfa/channels/porneq.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://porneq.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/videos/browse/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="peliculas", url=host + "/videos/most-viewed/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Votado" , action="peliculas", url=host + "/videos/most-liked/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="peliculas", url=host + "/show/big+tits&sort=w"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/show/%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="timer">(.*?)</span></div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<nav id="page_nav"><a href="(.*?)"')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"video-setup".*?file: "(.*?)",'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = str(scrapedurl)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/pornhive.json
Normal file
16
plugin.video.alfa/channels/pornhive.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "pornhive",
|
||||
"name": "pornhive",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.pornhive.tv/assets/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
88
plugin.video.alfa/channels/pornhive.py
Normal file
88
plugin.video.alfa/channels/pornhive.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.pornhive.tv/en'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="MOVIES" , action="peliculas", url=host))
|
||||
# No busca los videos tiene un capchka
|
||||
# itemlist.append( Item(channel=item.channel, title="CHANNELS" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?keyword=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'Categories(.*?)<li class="divider"')
|
||||
patron = '<li><a href="([^"]+)" title="[^"]+">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="col-lg-3 col-md-3 col-sm-4 col-xs-6 col-thumb panel-video-\d+">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next ›')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
return itemlist
|
||||
15
plugin.video.alfa/channels/qwertty.json
Normal file
15
plugin.video.alfa/channels/qwertty.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "qwertty",
|
||||
"name": "qwertty",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://qwertty.net/wp-content/uploads/2018/05/favicon.ico",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
88
plugin.video.alfa/channels/qwertty.py
Normal file
88
plugin.video.alfa/channels/qwertty.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://qwertty.net'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Recientes" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/?filter=most-viewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/?filter=popular"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/?filter=random"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?<img data-src="(.*?)".*?<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
scrapedplot = ""
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
|
||||
if next_page_url=="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
|
||||
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
pornhub = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
data = scrapertools.cachePage(pornhub)
|
||||
patron = '"videoUrl":"(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
15
plugin.video.alfa/channels/redtube.json
Normal file
15
plugin.video.alfa/channels/redtube.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "redtube",
|
||||
"name": "redtube",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://thumbs-cdn.redtube.com/www-static/cdn_files/redtube/images/pc/logo/redtube_logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
110
plugin.video.alfa/channels/redtube.py
Normal file
110
plugin.video.alfa/channels/redtube.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'https://es.redtube.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/mostviewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstar"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?search=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="pornstar_link js_mpop js-pop" href="([^"]+)".*?"([^"]+)"\s+title="([^"]+)".*?<div class="ps_info_count">\s+([^"]+)\s+Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] "
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?<span class="duration">(.*?)</a>.*?<a title="([^"]+)" href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,duration,scrapedtitle,scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedhd = scrapertools.find_single_match(duration, '<span class="hd-video-text">(.*?)</span>')
|
||||
if scrapedhd == 'HD':
|
||||
duration = scrapertools.find_single_match(duration, 'HD</span>(.*?)</span>')
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle
|
||||
else:
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
|
||||
title = title.replace(" </span>", "").replace(" ", "")
|
||||
scrapedthumbnail = scrapedthumbnail.replace("{index}.", "1.")
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '"defaultQuality":true,"format":"",.*?"videoUrl"\:"([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/sexgalaxy.json
Normal file
16
plugin.video.alfa/channels/sexgalaxy.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sexgalaxy",
|
||||
"name": "sexgalaxy",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://sexgalaxy.net/wp-content/uploads/2016/11/logogalaxy_red.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
98
plugin.video.alfa/channels/sexgalaxy.py
Normal file
98
plugin.video.alfa/channels/sexgalaxy.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://sexgalaxy.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/new-releases/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/full-movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canales" , action="canales", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def canales (item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(host)
|
||||
data = scrapertools.get_match(data,'Top Networks</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'More Categories</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fulltitle=scrapedtitle , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/sexkino.json
Normal file
16
plugin.video.alfa/channels/sexkino.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sexkino",
|
||||
"name": "sexkino",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://sexkino.to/wp-content/uploads/2016/12/sexkino.to_.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
113
plugin.video.alfa/channels/sexkino.py
Normal file
113
plugin.video.alfa/channels/sexkino.py
Normal file
@@ -0,0 +1,113 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://sexkino.to'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("pelisalacarta.sexkino mainlist")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="peliculas", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="anual", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("pelisalacarta.gmobi mainlist")
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("pelisalacarta.sexkino categorias")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)" >(.*?)</a> <i>(.*?)</i>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " ("+cantidad+")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info("pelisalacarta.sexkino anual")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("pelisalacarta.sexkino peliculas")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
#hay que hacer que coincida con el buscador
|
||||
patron = '<article.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?>(\d+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,date in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + date + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("pelisalacarta.a0 findvideos")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<tr id=(.*?)</tr>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
url = scrapertools.find_single_match(match,'href="([^"]+)" target')
|
||||
title = scrapertools.find_single_match(match,'<td><img src=.*?> (.*?)</td>')
|
||||
itemlist.append(item.clone(action="play", title=title, url=url))
|
||||
patron = '<iframe class="metaframe rptss" src="([^"]+)".*?<li><a class="options" href="#option-\d+">\s+(.*?)\s+<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
itemlist.append(item.clone(action="play", title=title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("pelisalacarta.sexkino play")
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/sexofilm.json
Normal file
16
plugin.video.alfa/channels/sexofilm.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sexofilm",
|
||||
"name": "sexofilm",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://i0.wp.com/sexofilm.com/xbox/wp-content/uploads/2016/06/SexoFilm-Logo-230x54-LOGO-MOBILE.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
110
plugin.video.alfa/channels/sexofilm.py
Normal file
110
plugin.video.alfa/channels/sexofilm.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://sexofilm.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/xtreme-adult-wing/adult-dvds/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parody" , action="peliculas", url=host + "/xtreme-adult-wing/porn-parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/xtreme-adult-wing/porn-clips-movie-scene/"))
|
||||
itemlist.append( Item(channel=item.channel, title="SexMUSIC" , action="peliculas", url=host + "/topics/sexo-music-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Xshows" , action="peliculas", url=host + "/xshows/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url =host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="tagcloud">(.*?)<p>')
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
|
||||
patron = '<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" Porn DVD", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
16
plugin.video.alfa/channels/spankwire.json
Normal file
16
plugin.video.alfa/channels/spankwire.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "spankwire",
|
||||
"name": "spankwire",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://cdn1-static-spankwire.spankcdn.net/apple-touch-icon-precomposed.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
97
plugin.video.alfa/channels/spankwire.py
Normal file
97
plugin.video.alfa/channels/spankwire.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.spankwire.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/home1/Straight/Month/Duration"))
|
||||
#itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/Straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)" />.*?<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/Submitted/59"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video_thumb_wrapper">.*?<a href="([^"]+)".*?data-original="([^"]+)".*?title="([^"]+)".*?<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
#Para el buscador
|
||||
if next_page_url=="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = scrapertools.get_match(data,'Copy Embed Code(.*?)For Desktop')
|
||||
patron = '<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
if url=="#":
|
||||
scrapedurl = scrapertools.find_single_match(data,'playerData.cdnPath480 = \'([^\']+)\'')
|
||||
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = scrapedurl, url=scrapedurl))
|
||||
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/sunporno.json
Normal file
16
plugin.video.alfa/channels/sunporno.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sunporno",
|
||||
"name": "sunporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://sunstatic.fuckandcdn.com/sun/sunstatic/v31/common/sunporno/img/logo_top.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
91
plugin.video.alfa/channels/sunporno.py
Normal file
91
plugin.video.alfa/channels/sunporno.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.sunporno.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host +"/most-recent/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-viewed/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largas" , action="peliculas", url=host + "/long-movies/date-last-month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="category-item">(.*?)<div id="goupBlock"')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">\s*(.*?)\s*<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl + "/most-recent/"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'<div id="mainThumbsContainer" class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
patron = '<p class="btime">([^"]+)</p>.*?href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for duracion,scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("https:", "http:")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/tabooshare.json
Normal file
16
plugin.video.alfa/channels/tabooshare.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "tabooshare",
|
||||
"name": "tabooshare",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://tabooshare.com/wp-content/uploads/2017/06/cropped-TSa-180x180.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
73
plugin.video.alfa/channels/tabooshare.py
Normal file
73
plugin.video.alfa/channels/tabooshare.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://tabooshare.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
|
||||
# itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h3>Categories</h3>(.*?)</ul>')
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post" id="post-\d+">.*?<a href="([^"]+)" title="(.*?)"><img src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" – Free Porn Download", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<span class="current">.*?<a href="(.*?)"')
|
||||
if next_page_url=="http://NaughtyPorn.net/":
|
||||
next_page_url = scrapertools.find_single_match(data,'<span class="current">.*?<a href=\'(.*?)\'')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
"name": "ThumbZilla",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": "en",
|
||||
"language": "*",
|
||||
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
|
||||
"thumbnail": "https://image.spreadshirtmedia.com/image-server/v1/designs/1002274824,width=178,height=178/thumbzilla-womens-white-tee-big-logo.png",
|
||||
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
@@ -35,3 +35,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from channelselector import get_thumb
|
||||
|
||||
__channel__ = "thumbzilla"
|
||||
|
||||
host = 'https://www.thumbzilla.com/'
|
||||
host = 'https://www.thumbzilla.com'
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
@@ -44,36 +44,28 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host,
|
||||
viewmode="movie", thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + 'newest',
|
||||
viewmode="movie", thumbnail=get_thumb("/channels_adult.png")))
|
||||
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + '/newest',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + 'tending',
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/trending',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + 'top',
|
||||
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + '/top',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Populares", url=host + 'popular',
|
||||
itemlist.append(Item(channel=__channel__, title="Populares", url=host + '/popular',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + 'hd',
|
||||
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + 'hd',
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
|
||||
url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host,
|
||||
thumbnail=get_thumb("channels_adult.png"), extra="buscar"))
|
||||
return itemlist
|
||||
@@ -100,7 +92,6 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a class="[^"]+" href="([^"]+)">' # url
|
||||
@@ -108,20 +99,15 @@ def videos(item):
|
||||
patron += '<span class="title">([^<]+)</span>.*?' # title
|
||||
patron += '<span class="duration">([^<]+)</span>' # time
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
|
||||
title = "[%s] %s" % (time, scrapedtitle)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, thumbnail=scrapedthumbnail,
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail,
|
||||
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="videos",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -130,12 +116,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
|
||||
patron += '<span class="count">([^<]+)</span>' # title, vids
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, vids in matches:
|
||||
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
|
||||
title = "%s (%s)" % (scrapedtitle, vids.title())
|
||||
@@ -144,23 +127,17 @@ def categorias(item):
|
||||
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
|
||||
title=title, url=url, thumbnail=thumbnail,
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
def play(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '"quality":"([^"]+)","videoUrl":"([^"]+)"'
|
||||
patron = '<li><a class="qualityButton active" data-quality="([^"]+)">([^"]+)</a></li>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for calidad, scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace('\\', '')
|
||||
for scrapedurl,calidad in matches:
|
||||
title = "[COLOR yellow](%s)[/COLOR] %s" % (calidad, item.contentTile)
|
||||
server = servertools.get_server_from_url(scrapedurl)
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, server=server, mediatype='movie', url=scrapedurl))
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", title=item.title , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
Reference in New Issue
Block a user