([^<]+)
' # plot matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches: itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(), url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail, viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle)) # Extrae el paginador paginacion = scrapertools.find_single_match(data, 'Next ›", "", data) # logger.info(data) patron = 'data-lazy-src="([^"]+)".*?' # img patron += '([^<]+).*?' # title patron += '([^<]+)' # num_vids matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle, vids in matches: title = "%s (%s)" % (scrapedtitle, vids.title()) itemlist.append(item.clone(channel=__channel__, action="peliculas", fanart=scrapedthumbnail, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, viewmode="movie_with_plot", folder=True)) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) try: return sub_search(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] def sub_search(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = 'data-lazy-src="([^"]+)".*?' # img patron += 'title="([^"]+)" />.*?' # title patron += '
([^<]+)
' # plot matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail, action="findvideos", thumbnail=scrapedthumbnail)) paginacion = scrapertools.find_single_match( data, "\d+") if paginacion: itemlist.append(item.clone(channel=__channel__, action="sub_search", title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) return itemlist def findvideos(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) patron = '' matches = scrapertools.find_multiple_matches(data, patron) for url in matches: server = servertools.get_server_from_url(url) title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server itemlist.append(item.clone(action='play', title=title, server=server, url=url)) return itemlist