|)\s*
Temporada (\d+)', re.DOTALL).findall(data)
for url, numtempo in matches:
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % numtempo, url = url,
contentType='season', contentSeason=numtempo ))
m = re.match('.*?-season-(\d+)-[a-z0-9A-Z]+-[a-z0-9A-Z]+\.html$', item.url)
if m:
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % m.group(1), url = item.url,
contentType='season', contentSeason=m.group(1) ))
tmdb.set_infoLabels(itemlist)
# if len(itemlist) == 1:
# itemlist = seasons_episodes(itemlist[0])
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
# return sorted(itemlist, key=lambda it: it.title)
return itemlist
# ~ # Si una misma url devuelve los episodios de todas las temporadas, definir rutina tracking_all_episodes para acelerar el scrap en trackingtools.
# ~ def tracking_all_episodes(item):
# ~ return episodios(item)
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
# def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# ~ logger.debug(data)
url = scrapertools.find_single_match(data, 'href="([^"]+)" action="watch"')
data = httptools.downloadpage(url).data
# ~ logger.debug(data)
patron = '
'
patron += '\s*
([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for season, episode, url, thumb, title in matches:
if item.contentSeason and item.contentSeason != int(season):
continue
titulo = '%sx%s %s' % (season, episode, title)
itemlist.append(item.clone( action='findvideos', url=url, title=titulo, thumbnail=thumb,
contentType='episode', contentSeason=season, contentEpisodeNumber=episode ))
tmdb.set_infoLabels(itemlist)
return itemlist
def detectar_server(servidor):
servidor = servidor.lower()
if 'server ' in servidor: return 'directo'
elif servidor == 'fast': return 'fembed'
# ~ elif 'server 1' in servidor: return 'fastproxycdn' # inexistente
# ~ elif 'server 4' in servidor: return '404' # error 404 !?
return servidor
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# ~ logger.debug(data)
token = scrapertools.find_single_match(data, ' 0 and item.extra !='findvideos' :
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, ignore_response_code=True).data
# ~ logger.debug(data)
url = scrapertools.find_single_match(data, '