(.*?)
.*?' patron += "(.*?)
(\d{4}) /.*?.*?'(\d+)'" matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedplot, year, video_id in matches: title = '%s [%s]' % (scrapedtitle, year) contentTitle = scrapedtitle thumbnail = scrapedthumbnail url = scrapedurl itemlist.append(item.clone(action='findvideos', title=title, url=url, thumbnail=thumbnail, contentTitle=contentTitle, video_id=video_id, infoLabels={'year':year})) elif item.type == 'series': patron = '(.*?)
(.*?)
(\d{4}) /' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, year in matches: title = scrapedtitle contentSerieName = scrapedtitle thumbnail = scrapedthumbnail url = scrapedurl itemlist.append(item.clone(action='seasons', title=title, url=url, thumbnail=thumbnail, plot=scrapedplot, contentSerieName=contentSerieName, infoLabels={'year':year})) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Paginación url_next_page = scrapertools.find_single_match(data,"") if url_next_page: itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all')) return itemlist def seasons(item): logger.info() itemlist=[] data=get_source(item.url) patron='|\s{2,}', "", data) packed = scrapertools.find_single_match(data, '(eval\(.*?);var') unpacked = jsunpack.unpack(packed) logger.debug('unpacked %s' % unpacked) server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/") id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'") if server == '': if 'powvideo' in unpacked: id = scrapertools.find_single_match(unpacked ,",description:.'(.*?).'") server= 'https://powvideo.net' url = '%s/%s' % (server, id) if server != '' and id != '': language = IDIOMAS[video_info['idioma']] quality = CALIDADES[video_info['calidad']] title = ' [%s] [%s]' % (language, quality) itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=language, quality=quality)) itmelist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) return sorted(itemlist, key=lambda i: i.language) def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = item.url + texto item.type = 'peliculas' if texto != '': return search_results(item) else: return [] def search_results(item): logger.info() itemlist=[] data=get_source(item.url) logger.debug(data) patron = '
(.*?)
(.*?)
(\d{4})<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, content_type ,scrapedthumb, scrapedtitle, scrapedplot, year in matches: title = scrapedtitle url = scrapedurl thumbnail = scrapedthumb plot = scrapedplot if content_type != 'Serie': action = 'findvideos' else: action = 'seasons' new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot, action=action, type=content_type, infoLabels={'year':year}) if new_item.action == 'findvideos': new_item.contentTitle = new_item.title else: new_item.contentSerieName = new_item.title itemlist.append(new_item) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist def newest(categoria): logger.info() itemlist = [] item = Item() try: if categoria in ['peliculas']: item.url = host + 'peliculas' elif categoria == 'infantiles': item.url = host + 'peliculas/generos/animación' elif categoria == 'terror': item.url = host + 'peliculas/generos/terror' item.type='peliculas' itemlist = list_all(item) if itemlist[-1].title == 'Siguiente >>': itemlist.pop() except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] return itemlist