.*?data-src="([^"]+)"'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
+ url = scrapedurl
+
+ contentSerieName = scrapedtitle
+ action = 'seasons'
+
+ thumbnail = scrapedthumbnail
+ new_item = Item(channel=item.channel, title=scrapedtitle, url=url, thumbnail=thumbnail,
+ contentSerieName=contentSerieName, action=action)
+
+ itemlist.append(new_item)
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ # Paginacion
+ next_page = scrapertools.find_single_match(data, 'Página siguiente')
+ if next_page != '':
+ itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
+ url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
+ type=item.type))
+ return itemlist
+
+
+def section(item):
+
+ itemlist = []
+
+ full_data = get_source(host)
+
+ if item.title == 'Generos':
+ data = scrapertools.find_single_match(full_data, '>Géneros(.*?)')
+ elif item.title == 'Alfabetico':
+ data = scrapertools.find_single_match(full_data, '')
+ else:
+ data = scrapertools.find_single_match(full_data, 'Temporada %s.*?(.*?)' % season)
+ patron = '
.*?;.?([^<]+)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ infoLabels = item.infoLabels
+ epi = 1
+ for scrapedurl, scrapedtitle in matches:
+ url = scrapedurl
+ contentEpisodeNumber = str(epi)
+ title = '%sx%s - %s ' % (season, contentEpisodeNumber, scrapedtitle)
+ infoLabels['episode'] = contentEpisodeNumber
+ itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
+ contentSerieName=item.contentSerieName, contentEpisodeNumber=contentEpisodeNumber,
+ infoLabels=infoLabels))
+ epi += 1
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ return itemlist
+
+def search(item, text):
+ logger.info()
+
+ item.url = item.url + text
+ if text != '':
+ return list_all(item)
+
+def findvideos(item):
+
+ itemlist = []
+ data = get_source(item.url)
+ patron = 'iframe src="([^&]+)&'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for link in matches:
+ if 'id=' in link:
+ id_type = 'id'
+ ir_type = 'ir'
+ elif 'ud=' in link:
+ id_type = 'ud'
+ ir_type = 'ur'
+ id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
+ base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)
+
+ ir = id[::-1]
+ referer = base_link+'%s=%s&/' % (id_type, ir)
+ video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer':referer},
+ follow_redirects=False)
+ url = video_data.headers['location']
+ title = '%s'
+
+ itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
+ language='', infoLabels=item.infoLabels))
+
+ itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
+
+ # Requerido para FilterTools
+
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+
+ autoplay.start(itemlist, item)
+
+ return itemlist
+
+
+
+
+
diff --git a/plugin.video.alfa/channels/tupelicula.py b/plugin.video.alfa/channels/tupelicula.py
index 592d4710..f17a1978 100644
--- a/plugin.video.alfa/channels/tupelicula.py
+++ b/plugin.video.alfa/channels/tupelicula.py
@@ -141,8 +141,15 @@ def findvideos(item):
hidden_url = get_source('%splayer/rep/%s' % (host, scraped_id), player)
url = scrapertools.find_single_match(hidden_url, 'iframe src=.?"([^"]+)"').replace('\\','')
lang = get_language(lang_data)
- itemlist.append(Item(channel=item.channel, title='%s', url=url, action='play', language=lang,
- infoLabels=item.infoLabels))
+
+ if not config.get_setting('unify'):
+ title = ' %s' % lang
+ else:
+ title = ''
+
+ if url != '':
+ itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=lang,
+ infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())