corrección de cachepage
This commit is contained in:
@@ -109,7 +109,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -75,7 +75,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a href="([^"]+)" class="thumb">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<span class="dur">(.*?)</span>'
|
||||
@@ -78,7 +78,7 @@ def peliculas(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<ul class="cf">(.*?)<h2>Advertisement</h2>')
|
||||
patron = '<li>.*?<a href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)" alt="([^"]+)".*?'
|
||||
@@ -79,7 +79,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url\+="([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -86,7 +86,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article class="item" data-video-id="([^"]+)">.*?src="([^"]+)" alt="([^"]+)".*?<div class="thumbnail__info__right">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -107,7 +107,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url\+=\'([^\']+)\'')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -11,9 +11,9 @@ from platformcode import logger
|
||||
host = 'http://sexkino.to'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("pelisalacarta.sexkino mainlist")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="peliculas", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="lista", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="anual", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
|
||||
@@ -26,7 +26,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -35,9 +35,9 @@ def search(item, texto):
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("pelisalacarta.sexkino categorias")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)" >(.*?)</a> <i>(.*?)</i>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -45,52 +45,77 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " ("+cantidad+")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info("pelisalacarta.sexkino anual")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("pelisalacarta.sexkino peliculas")
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
#hay que hacer que coincida con el buscador
|
||||
patron = '<article.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?>(\d+)</span>'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="poster">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<span class="quality">([^"]+)</span>.*?'
|
||||
patron += '<a href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,date in matches:
|
||||
for scrapedthumbnail,scrapedtitle,calidad,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + date + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
scrapedtitle = scrapedtitle + " (" + calidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page != "":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("pelisalacarta.a0 findvideos")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# <th>Watch online</th><th>Quality</th><th>Language</th><th>Added</th></tr></thead>
|
||||
# <tbody>
|
||||
# <tr id='link-3848'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidzella.me'> <a href='http://sexkino.to/links/69321-5/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3847'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=flashx.tv'> <a href='http://sexkino.to/links/69321-4/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3844'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=openload.co'> <a href='http://sexkino.to/links/69321-3/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3843'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidoza.net'> <a href='http://sexkino.to/links/69321-2/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3842'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=rapidvideo.ws'> <a href='http://sexkino.to/links/69321/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# </tbody></table></div></div></div></div>
|
||||
|
||||
|
||||
|
||||
patron = '<tr id=(.*?)</tr>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
url = scrapertools.find_single_match(match,'href="([^"]+)" target')
|
||||
title = scrapertools.find_single_match(match,'<td><img src=.*?> (.*?)</td>')
|
||||
itemlist.append(item.clone(action="play", title=title, url=url))
|
||||
|
||||
# <a id="link" href="https://vidzella.me/play#GS7D" class="btn" style="background-color:#1e73be">Continue</a>
|
||||
|
||||
patron = '<iframe class="metaframe rptss" src="([^"]+)".*?<li><a class="options" href="#option-\d+">\s+(.*?)\s+<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -101,8 +126,8 @@ def findvideos(item):
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("pelisalacarta.sexkino play")
|
||||
data = scrapertools.cachePage(item.url)
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -7,18 +7,16 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.spankwire.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/home1/Straight/Month/Duration"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/home1/Straight/Month/Duration"))
|
||||
#itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/Straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -29,7 +27,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -42,7 +40,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)" />.*?<span>([^"]+)</span>'
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)" />.*?'
|
||||
patron += '<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
@@ -50,16 +50,20 @@ def categorias(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/Submitted/59"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video_thumb_wrapper">.*?<a href="([^"]+)".*?data-original="([^"]+)".*?title="([^"]+)".*?<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
patron = '<div class="video_thumb_wrapper">.*?'
|
||||
patron += '<a href="([^"]+)".*?data-original="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += '<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -67,23 +71,23 @@ def peliculas(item):
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
#Para el buscador
|
||||
if next_page_url=="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
if next_page=="":
|
||||
next_page = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel , action="lista" , title="Página Siguiente >>" , text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'Copy Embed Code(.*?)For Desktop')
|
||||
patron = '<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
@@ -56,7 +56,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -38,7 +38,7 @@ def novedades(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# <a href="http://tubehentai.com/videos/slave_market_¨c_ep1-595.html"><img class="img" width="145" src="http://tubehentai.com/media/thumbs/5/9/5/./f/595/595.flv-3.jpg" alt="Slave_Market_¨C_Ep1" id="4f4fbf26f36
|
||||
patron = '<a href="(http://tubehentai.com/videos/[^"]+)"><img.*?src="(http://tubehentai.com/media/thumbs/[^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -80,7 +80,7 @@ def play(item):
|
||||
# s1.addParam("flashvars","overlay=http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4
|
||||
# http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4
|
||||
# http://tubehentai.com/media/videos/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4?start=0
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.get_match(data, 's1.addParam\("flashvars","bufferlength=1&autostart=true&overlay=(.*?\.mp4)')
|
||||
url = url.replace("/thumbs", "/videos")
|
||||
# url = url+"?start=0"
|
||||
|
||||
@@ -9,16 +9,16 @@ from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://www.vidz7.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url="http://www.vidz7.com/"))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url=host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.vidz7.com/category/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
|
||||
url="http://www.vidz7.com/?s="))
|
||||
|
||||
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/category/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url="http://www.vidz7.com"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = "{0}{1}".format(item.url, texto)
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -52,38 +52,15 @@ def categorias(item):
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
|
||||
# <article id='post-id-40630' class='video-index ceil'>
|
||||
# <div class='thumb-wrapp'>
|
||||
# <a href='http://www.vidz78.com/2019/03/22/deux-blacks-tbm-pour-julia-30ans/189/' class='thumb' style='background-image:url("https://pp.userapi.com/c855416/v855416475/ab7f/utBev5x7QuA.jpg")'>
|
||||
# <div class='overlay'></div>
|
||||
# <div class='vl'>
|
||||
# <div class="hd">HD</div> <div class="duration">36:28</div> </div>
|
||||
# </a>
|
||||
# </div>
|
||||
# <div class='info-card'>
|
||||
# <h6><a class='hp' href='http://www.vidz78.com/2019/03/22/deux-blacks-tbm-pour-julia-30ans/189/'>Jacquieetmicheltv - Deux blacks TBM pour Julia, 30ans !</a></h6>
|
||||
|
||||
# <time class="video-date" datetime="2019-03-22T10:32:46+00:00">Mar 22, 2019</time>
|
||||
|
||||
# <span> / 5.1k views</span>
|
||||
|
||||
|
||||
|
||||
|
||||
# </div>
|
||||
# </article>
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?"
|
||||
patron += "<div class=\"hd\">(.*?)</div>.*?"
|
||||
patron += "<div class=\"duration\">(.*?)</div>.*?"
|
||||
patron += "<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedthumbnail, scrapedhd, duration, scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
@@ -93,13 +70,9 @@ def lista(item):
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, thumbnail=thumbnail, fanart=thumbnail,
|
||||
fulltitle=title, url=url,
|
||||
viewmode="movie", folder=True))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data,
|
||||
'<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">')
|
||||
|
||||
paginacion = scrapertools.find_single_match(data,'<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">')
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -116,5 +89,5 @@ def play(item):
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.title = item.title
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -88,12 +88,14 @@ def lista(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '<span class="time">(.*?)</span>.*?'
|
||||
patron += '<span class="time">(.*?)</span>(.*?)</span>.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
|
||||
for scrapedurl,time,calidad,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace(", ", " & ").replace("(", "(").replace(")", ")")
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
if "hd-marker is-hd" in calidad:
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + "[COLOR red]" + "HD" + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl,
|
||||
|
||||
@@ -64,7 +64,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
@@ -91,7 +91,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
variable = scrapertools.find_single_match(data,'<script type=\'text/javascript\'> str=\'([^\']+)\'')
|
||||
resuelta = re.sub("@[A-F0-9][A-F0-9]", lambda m: m.group()[1:].decode('hex'), variable)
|
||||
url = scrapertools.find_single_match(resuelta,'<iframe src="([^"]+)"')
|
||||
|
||||
Reference in New Issue
Block a user