", start)
# scrapedplot = html[start:end]
# scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
# scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("streaming", "")
scrapedtitle = scrapedtitle.replace("_", " ")
scrapedtitle = scrapedtitle.replace("-", " ")
scrapedtitle = scrapedtitle.title()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
viewmode="movie_with_plot",
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = ''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
scrapedurl = scrapedurl.replace("&", "&")
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def findvideos(item):
logger.info("kod.documentaristreamingda findvideos")
data = httptools.downloadpage(item.url).data
links = []
begin = data.find(' (.*?)<\/strong><\/p>', mdiv)]
if items:
_title = ''
for idx, val in enumerate(items):
if idx == len(items) - 1:
_data = mdiv[val[0]:-1]
else:
_data = mdiv[val[0]:items[idx + 1][0]]
for link in re.findall('(?:]+>)*(?:)*([^<]+)', _data):
if not link[0].strip() in [l[1] for l in links]:
if not link[1].strip() in link[0]: _title = link[1].strip()
links.append([_title, link[0].strip(), 'unknown'])
items = [[m.start(), m.group(1)] for m in re.finditer('