Ultima hornada
This commit is contained in:
15
plugin.video.alfa/channels/camwhoresbay.json
Normal file
15
plugin.video.alfa/channels/camwhoresbay.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "camwhoresbay",
|
||||
"name": "camwhoresbay",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.camwhoresbay.com/images/porntrex.ico",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
97
plugin.video.alfa/channels/camwhoresbay.py
Normal file
97
plugin.video.alfa/channels/camwhoresbay.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
|
||||
host = 'https://www.camwhoresbay.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img class="thumb" src="([^"]+)".*?'
|
||||
patron += '<div class="videos">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="video-item ">.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
17
plugin.video.alfa/channels/eroticasonlinetv.json
Normal file
17
plugin.video.alfa/channels/eroticasonlinetv.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"id": "eroticasonlinetv",
|
||||
"name": "eroticasonlinetv",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.peliculaseroticasonline.tv/wp-content/themes/wpeliculaseroticasonlinetv/favicon.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
89
plugin.video.alfa/channels/eroticasonlinetv.py
Normal file
89
plugin.video.alfa/channels/eroticasonlinetv.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://www.peliculaseroticasonline.tv'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)".*?>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="movie-poster"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
plot = ""
|
||||
contentTitle = scrapedtitle
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=scrapedtitle , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente »</a>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
|
||||
if url == "":
|
||||
url = scrapertools.find_single_match(data, 'window.location="([^"]+)"')
|
||||
itemlist = servertools.find_video_items(data=url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://fxtimg.com/xlogo_.png.pagespeed.ic.doVRQMV5ub.png",
|
||||
"thumbnail": "http://fxtimg.com/xlogo_.png.pagespeed.ic.doVRQMV5ub.png|Referer=http://es.foxtube.com",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
|
||||
@@ -54,13 +54,13 @@ def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<a class="thumb tco1" href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)">.*?<i class="m tc2">(.*?)</i>'
|
||||
patron = '<a class="thumb tco1" href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)".*?<i class="m tc2">(.*?)</i>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = host + scrapedurl
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
thumbnail = scrapedthumbnail + "|Referer=%s" %host
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
@@ -80,7 +80,7 @@ def play(item):
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=scrapedurl, url=scrapedurl,
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
15
plugin.video.alfa/channels/javwhores.json
Normal file
15
plugin.video.alfa/channels/javwhores.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "javwhores",
|
||||
"name": "javwhores",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.javwhores.com/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
97
plugin.video.alfa/channels/javwhores.py
Normal file
97
plugin.video.alfa/channels/javwhores.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
|
||||
host = 'https://www.javwhores.com/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img class="thumb" src="([^"]+)".*?'
|
||||
patron += '<div class="videos">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="video-item ">.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
17
plugin.video.alfa/channels/jizzbunker.json
Normal file
17
plugin.video.alfa/channels/jizzbunker.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"id": "jizzbunker",
|
||||
"name": "jizzbunker",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://s0.cdn3x.com/jb/i/logo-new.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
89
plugin.video.alfa/channels/jizzbunker.py
Normal file
89
plugin.video.alfa/channels/jizzbunker.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://jizzbunker.com/es'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Tendencia" , action="peliculas", url=host + "/trending"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?query=%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><figure>.*?<a href="([^"]+)".*?<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?<span class="score">(\d+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedurl = scrapedurl.replace("channel", "channel30")
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><figure>.*?<a href="([^"]+)/([^"]+).html".*?<img class="lazy" data-original="([^"]+)".*?<time datetime=".*?">([^"]+)</time>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
url = scrapedurl + "/" + scrapedtitle + ".html"
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">→</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'type:\'video/mp4\',src:\'([^\']+)\''
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("https", "http")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
@@ -15,7 +15,9 @@ host = 'http://www.perfectgirls.net'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/top/3days/"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -38,7 +40,7 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -54,17 +56,25 @@ def peliculas(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="list__item_link"><a href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)".*?<time>([^"]+)</time>'
|
||||
patron = '<div class="list__item_link"><a href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<time>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,time in matches:
|
||||
plot = ""
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
|
||||
scrapedhd = scrapertools.find_single_match(time, '<div class="hd">([^"]+)</div>')
|
||||
if scrapedhd == 'HD':
|
||||
time = scrapertools.find_single_match(time, '([^"]+)</time>')
|
||||
title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle
|
||||
else:
|
||||
time = scrapertools.find_single_match(time, '([^"]+)</time>')
|
||||
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
@@ -72,11 +82,11 @@ def peliculas(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)"'
|
||||
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)" type="video/mp4" default/>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))
|
||||
|
||||
15
plugin.video.alfa/channels/pornrewind.json
Normal file
15
plugin.video.alfa/channels/pornrewind.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "pornrewind",
|
||||
"name": "pornrewind",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.pornrewind.com/static/images/logo-light-pink.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
76
plugin.video.alfa/channels/pornrewind.py
Normal file
76
plugin.video.alfa/channels/pornrewind.py
Normal file
@@ -0,0 +1,76 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
|
||||
host = 'https://www.pornrewind.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/videos/?sort_by=post_date"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/videos/?sort_by=rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/videos/?sort_by=video_viewed"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="thumb-categories" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img class="lazyload" data-src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="thumb" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img class="lazyload" data-src="([^"]+)".*?'
|
||||
patron += '<span>(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="direction"><a href="([^"]+)" data-ajax="pagination">')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
136
plugin.video.alfa/channels/spankbang.py
Normal file
136
plugin.video.alfa/channels/spankbang.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
|
||||
host = 'https://spankbang.xxx'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url= host + "/new_videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/wall-note-1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url= host + "/wall-main-1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="peliculas", url= host + "/wall-time-1.html"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search-%s-1.html" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = ' <a href="([^"]+)" class="link1">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl.replace(".html", "_date.html")
|
||||
scrapedurl = host +"/" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.cachePage(item.url)
|
||||
|
||||
# <div class="video-item" data-id="4652797">
|
||||
# <a href="/2rq4d/video/yenlomfc" class="thumb ">
|
||||
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb3.spankbang.com/250/4/6/4652797-t6.jpg" alt="yenlomfc" class="cover lazyload has_mp4" />
|
||||
# <span class="play fa fa-play-circle-o fa-3x"></span>
|
||||
# <span class="i-len"><i class="fa fa-clock-o"></i> 73</span>
|
||||
# </a>
|
||||
# <span class="i-wl" onclick="add_wl(4652797, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
|
||||
# <span class="i-fav" onclick="add_fav(4652797, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
|
||||
# <span class="i-flag" onclick="show_flag(4652797)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
|
||||
# <div class="inf">yenlomfc</div>
|
||||
# <ul>
|
||||
# <li>Hace 11 minutos</li>
|
||||
# <li><i class="fa fa-eye"></i> 60</li>
|
||||
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
|
||||
# </ul>
|
||||
# </div>
|
||||
# <div class="video-item" data-id="4652795">
|
||||
# <a href="/2rq4b/video/penny+underbust+playstation+modeling" class="thumb ">
|
||||
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb1.spankbang.com/250/4/6/4652795-t6.jpg" alt="Penny Underbust Playstation Modeling" class="cover lazyload " />
|
||||
# <span class="play fa fa-play-circle-o fa-3x"></span>
|
||||
# <span class="i-hd">1080p</span>
|
||||
# <span class="i-len"><i class="fa fa-clock-o"></i> 3</span>
|
||||
# </a>
|
||||
# <span class="i-wl" onclick="add_wl(4652795, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
|
||||
# <span class="i-fav" onclick="add_fav(4652795, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
|
||||
# <span class="i-flag" onclick="show_flag(4652795)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
|
||||
# <div class="inf">Penny Underbust Playstation Modeling</div>
|
||||
# <ul>
|
||||
# <li>Hace 12 minutos</li>
|
||||
# <li><i class="fa fa-eye"></i> 99</li>
|
||||
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
|
||||
# </ul>
|
||||
# </div>
|
||||
|
||||
|
||||
|
||||
patron = '<div class="video-item" data-id="\d+">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<i class="fa fa-clock-o"></i>(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
# http://cdnthumb1.spankbang.com/250/4/6/4652755-t6.jpg
|
||||
thumbnail = "http:" + scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
# <li class="next"><a href="/new_videos/2/">»</a></li>
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)">')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'servervideo = \'([^\']+)\'.*?'
|
||||
patron += 'path = \'([^\']+)\'.*?'
|
||||
patron += 'filee = \'([^\']+)\'.*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for servervideo,path,filee in matches:
|
||||
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
17
plugin.video.alfa/channels/streamingporn.json
Normal file
17
plugin.video.alfa/channels/streamingporn.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"id": "streamingporn",
|
||||
"name": "streamingporn",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://streamingporn.xyz/wp-content/uploads/2017/06/streamingporn.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
105
plugin.video.alfa/channels/streamingporn.py
Normal file
105
plugin.video.alfa/channels/streamingporn.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://streamingporn.xyz'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/category/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/stream/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'PaySites(.*?)<li id="menu-item-28040"')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<a href="#">Categories</a>(.*?)<li id="menu-item-30919')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="entry-featuredImg">.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="loadMoreInfinite"><a href="(.*?)" >Load More')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
17
plugin.video.alfa/channels/streamporno.json
Normal file
17
plugin.video.alfa/channels/streamporno.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"id": "streamporno",
|
||||
"name": "streamporno",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://pornstreams.eu/wp-content/uploads/2015/12/faviiconeporn.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
78
plugin.video.alfa/channels/streamporno.py
Normal file
78
plugin.video.alfa/channels/streamporno.py
Normal file
@@ -0,0 +1,78 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://streamporno.eu'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-.*?<a href="([^"]+)">([^"]+)</a>'
|
||||
if item.title == "Categorias":
|
||||
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="peliculas", url=host + "/?s=big+tits"))
|
||||
patron = '<li class="cat-item.*?<a href="([^"]+)" >([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article id=.*?<a href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
@@ -14,3 +14,4 @@
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ def search(item, texto):
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="vidcountSp">(\d+)</div>.*?<a class="categoryTitle channelTitle" href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for cantidad,scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
@@ -62,7 +62,7 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if item.title=="PornStars" :
|
||||
data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h2>(.*?)</section>')
|
||||
data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h1>(.*?)</section>')
|
||||
patron = '<a class="thumb" href="([^"]+)">.*?<img src="([^"]+)".*?<div class="vidcountSp">(.*?)</div>.*?<a class="categoryTitle".*?>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
@@ -84,7 +84,7 @@ def categorias(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -106,7 +106,7 @@ def peliculas(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<meta itemprop="contentUrl" content="([^"]+)" />'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
|
||||
16
plugin.video.alfa/channels/tubedupe.json
Normal file
16
plugin.video.alfa/channels/tubedupe.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "tubedupe",
|
||||
"name": "tubedupe",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://tubedupe.com/apple-touch-icon-180x180-precomposed.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
106
plugin.video.alfa/channels/tubedupe.py
Normal file
106
plugin.video.alfa/channels/tubedupe.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
|
||||
host = 'https://tubedupe.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/?sort_by=model_viewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/?sort_by=cs_viewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=avg_videos_popularity"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Categorias" or "Canal" :
|
||||
patron = '<a href="([^"]+)" class="list-item" title="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<var class="duree">([^"]+) </var>'
|
||||
else:
|
||||
patron = '<div class="block-pornstar">.*?<a href="([^"]+)" title="([^"]+)" >.*?src="([^"]+)".*?<div class="col-lg-4-fixed nb-videos">.*?<br>(\d+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="categorias", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="block-video">.*?'
|
||||
patron += '<a href="([^"]+)" class="[^"]+" title="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<var class="duree">(.*?)</var>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
17
plugin.video.alfa/channels/xxxdan.json
Normal file
17
plugin.video.alfa/channels/xxxdan.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"id": "xxxdan",
|
||||
"name": "xxxdan",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://s0.cdn3x.com/xxxdan/i/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
107
plugin.video.alfa/channels/xxxdan.py
Normal file
107
plugin.video.alfa/channels/xxxdan.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://xxxdan.com'
|
||||
|
||||
#NO SE REPRODUCE EL VIDEO QUE ENCUENTRA
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular30"))
|
||||
itemlist.append( Item(channel=item.channel, title="Dururacion" , action="peliculas", url=host + "/longest"))
|
||||
itemlist.append( Item(channel=item.channel, title="HD" , action="peliculas", url=host + "/channel30/hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?query=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" rel="tag".*?title="([^"]+)".*?data-original="([^"]+)".*?<span class="score">(\d+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = scrapedurl.replace("channel", "channel30")
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><figure>\s*<a href="([^"]+)" class="img\s*" title="([^"]+)".*?data-original="([^"]+)".*?<time datetime="\w+">([^"]+)</time>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
url = scrapedurl
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">→</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = next_page_url.replace("http://xxxdan.com/","")
|
||||
next_page_url = "/" + next_page_url
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
media_url = scrapertools.find_single_match(data, 'src:\'([^\']+)\'')
|
||||
media_url = media_url.replace("https","http")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://yuuk.net/wp-content/uploads/2018/06/yuuk_net_logo.png",
|
||||
"banner": "",
|
||||
],
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
|
||||
@@ -17,7 +17,7 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-genres/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -39,11 +39,13 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Big Tits" , url="http://yuuk.net/?s=big+tit" , folder=True) )
|
||||
patron = 'menu-item-object-category"><a href="([^"]+)">.*?</style>([^"]+)</a>'
|
||||
itemlist.append( Item(channel=item.channel, title="Censored" , action="peliculas", url=host + "/category/censored/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Uncensored" , action="peliculas", url=host + "/category/uncensored/"))
|
||||
patron = '<li><a href="([^"]+)" title="[^"]+"><span>([^"]+)</span><span>([^"]+)</span></a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
@@ -54,11 +56,12 @@ def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?>#([^"]+) Full HD JAV</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,calidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
|
||||
if next_page_url!="":
|
||||
|
||||
Reference in New Issue
Block a user