(.*?)'
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype in matches:
+ url = scrapedurl
+ scrapedtype = scrapedtype.lower()
+ scrapedtitle = scrapedtitle
+ thumbnail = scrapedthumbnail
+ new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
+ thumbnail=thumbnail, type=scrapedtype)
+ if scrapedtype != 'dorama':
+ new_item.action = 'findvideos'
+ new_item.contentTitle = scrapedtitle
+
+ else:
+ new_item.contentSerieName=scrapedtitle
+ new_item.action = 'episodes'
+ itemlist.append(new_item)
+
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ # Paginacion
+
+ if itemlist != []:
+ if item.type != 'dorama':
+ page_base = host+'catalogue?type[]=pelicula'
+ else:
+ page_base = host + 'catalogue'
+ next_page = scrapertools.find_single_match(data, '
')
+ if next_page != '':
+ itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
+ url=page_base+next_page, thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png',
+ type=item.type))
+ return itemlist
+
+
+def latest_episodes(item):
+ logger.info()
+ itemlist = []
+ infoLabels = dict()
+ data = get_source(item.url)
+
+ patron = '(.*?) .*?episode>(.*?)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
+ title = '%s %s' % (scrapedtitle, scrapedep)
+ contentSerieName = scrapedtitle
+ itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
+ title=title, contentSerieName=contentSerieName, type='episode'))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ return itemlist
+
+
+def episodes(item):
+ logger.info()
+ itemlist = []
+ data = get_source(item.url)
+ patron = '