([^<]+)
' # plot matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches: plot = scrapertools.decodeHtmlentities(plot) itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(), url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail,viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle)) # Extrae el paginador paginacion = scrapertools.find_single_match(data, 'Next ›|#038;", "", data) patron = '
.*?.*?' # video_url
patron += 'data-username="([^"]+)".*?' # username
patron += 'title="([^"]+)".*?' # title
patron += 'data-profile="([^"]+)" />' # img
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '')
itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle,
url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
# Extrae el paginador
paginacion = scrapertools.find_single_match(data, '\d+ ", "", data)
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += '([^<]+).*?' # title
patron += '([^<]+)' # num_vids
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle, vids in matches:
title = "%s (%s)" % (scrapedtitle, vids.title())
itemlist.append(item.clone(channel=__channel__, action="peliculas", fanart=scrapedthumbnail,
title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
viewmode="movie_with_plot", folder=True))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |
", "", data) patron = 'data-lazy-src="([^"]+)".*?' # img patron += 'title="([^"]+)" />.*?' # title patron += '
", "", data) patron = 'data-lazy-src="([^"]+)".*?' # img patron += 'title="([^"]+)" />.*?' # title patron += '
([^<]+)
' # plot matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail, action="findvideos", thumbnail=scrapedthumbnail)) paginacion = scrapertools.find_single_match( data, "\d+") if paginacion: itemlist.append(item.clone(channel=__channel__, action="sub_search", title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) return itemlist def findvideos(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) patron = '