]+src="([^"]+)"[\s\S]+?(.+?)
'
+ data = scrapertools.find_single_match(data, '')
+ patronvideos = '(?s)[^<]+([^<]+).*?src="([^"]+)"[\s\S]+?(.+?)
'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
-
for url, title, thumbnail, plot in matches:
scrapedtitle = scrapertools.entityunescape(title)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = thumbnail
scrapedplot = plot
- logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
-
- episodio = scrapertools.get_match(scrapedtitle, '\s+#(.*?)$')
+ episodio = scrapertools.find_single_match(scrapedtitle, '\s+#(.*?)$')
contentTitle = scrapedtitle.replace('#' + episodio, '')
-
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle))
-
return itemlist
def generos(item):
logger.info()
-
- # Descarga la pagina
- data = httptools.downloadpage(item.url).data
- data = scrapertools.get_match(data, '(.*?)
')
- patronvideos = ' ([^<]+) '
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
-
+ data = httptools.downloadpage(item.url).data
+ data = scrapertools.find_single_match(data, '(.*?)
')
+ patronvideos = '(?s)([^<]+) '
+ matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = ""
scrapedplot = ""
- logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
-
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, show=title, viewmode="movie_with_plot"))
-
return itemlist
def letras(item):
logger.info()
-
- # Descarga la pagina
data = httptools.downloadpage(item.url).data
- data = scrapertools.get_match(data, '')
+ data = scrapertools.find_single_match(data, '')
patronvideos = ' ([^<]+) '
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
-
for url, title in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
- scrapedthumbnail = ""
- scrapedplot = ""
- logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
-
itemlist.append(
- Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
- plot=scrapedplot, show=title, viewmode="movie_with_plot"))
+ Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl,
+ show=title, viewmode="movie_with_plot"))
return itemlist
def series(item):
logger.info()
-
- # Descarga la pagina
- data = httptools.downloadpage(item.url).data
- logger.debug("datito %s" % data)
-
- '''
-
-
-
-
-
-
-
-
-
- El Reino de Segua ha ido perdiendo la guerra contra el Imperio de Ninterdo pero la situación ha cambiado
- con la aparición de un chico llamado Gear. Todos los personajes son parodias de protas de videojuegos de
- Nintendo y Sega respectivamente, como lo son Sonic the Hedgehog, Super Mario Bros., The Legend of Zelda,
- etc.
-
-
- '''
- patron = '([^<]+)
[^<]+(.*?)<'
- matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
-
+ data = httptools.downloadpage(item.url).data
+ patron = '(?s)([^<]+)(.*?)<'
+ matches = scrapertools.find_multiple_matches(data, patron)
for url, title, thumbnail, plot in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = thumbnail
scrapedplot = plot
- logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
-
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle,
viewmode="movie_with_plot"))
-
itemlist = sorted(itemlist, key=lambda it: it.title)
-
try:
- page_url = scrapertools.get_match(data, '> ')
+ page_url = scrapertools.find_single_match(data, '> ')
itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente",
url=urlparse.urljoin(item.url, page_url), viewmode="movie_with_plot", thumbnail="",
plot=""))
except:
pass
-
return itemlist
def episodios(item, final=True):
logger.info()
-
- # Descarga la pagina
- body = httptools.downloadpage(item.url).data
-
- try:
- scrapedplot = scrapertools.get_match(body, ' (.*?)')
- patron = '(.*?) '
- matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
-
- for url, title in matches:
- scrapedtitle = scrapertools.htmlclean(title)
-
- try:
- episodio = scrapertools.get_match(scrapedtitle, "Capítulo\s+(\d+)")
- titulo_limpio = re.compile("Capítulo\s+(\d+)\s+", re.DOTALL).sub("", scrapedtitle)
- if len(episodio) == 1:
- scrapedtitle = "1x0" + episodio + " - " + titulo_limpio
- else:
- scrapedtitle = "1x" + episodio + " - " + titulo_limpio
- except:
- pass
-
- scrapedurl = urlparse.urljoin(item.url, url)
- # scrapedthumbnail = ""
- # scrapedplot = ""
- logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
-
- itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
- thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show))
-
- try:
- next_page = scrapertools.get_match(body, '\>\; ')
- next_page = urlparse.urljoin(item.url, next_page)
- item2 = Item(channel=item.channel, action="episodios", title=item.title, url=next_page,
- thumbnail=item.thumbnail, plot=item.plot, show=item.show, viewmode="movie_with_plot")
- itemlist.extend(episodios(item2, final=False))
- except:
- import traceback
- logger.error(traceback.format_exc())
-
- if final and config.get_videolibrary_support():
+ data = httptools.downloadpage(item.url).data
+ data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)')
+ CHANNEL_HEADERS = [
+ ["Host", "m.animeid.tv"],
+ ["X-Requested-With", "XMLHttpRequest"]
+ ]
+ page = 0
+ while True:
+ page += 1
+ u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" %(data_id, page)
+ data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data
+ # Cuando ya no hay datos devuelve: "list":[]
+ if '"list":[]' in data:
+ break
+ dict_data = jsontools.load(data)
+ list = dict_data['list']
+ for dict in list:
+ itemlist.append(Item(action = "findvideos",
+ channel = item.channel,
+ title = "1x" + dict["numero"] + " - " + dict["date"],
+ url = CHANNEL_HOST + dict['href'],
+ thumbnail = item.thumbnail,
+ show = item.show,
+ viewmode = "movie_with_plot"
+ ))
+ if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
-
return itemlist
def findvideos(item):
logger.info()
-
- data = httptools.downloadpage(item.url).data
itemlist = []
-
+ data = httptools.downloadpage(item.url).data
url_anterior = scrapertools.find_single_match(data, '« Capítulo anterior')
url_siguiente = scrapertools.find_single_match(data, 'Siguiente capítulo »')
-
- data = scrapertools.find_single_match(data, '')
- data = data.replace("\\/", "/")
- data = data.replace("%3A", ":")
- data = data.replace("%2F", "/")
- logger.info("data=" + data)
-
- # http%3A%2F%2Fwww.animeid.moe%2Fstream%2F41TLmCj7_3q4BQLnfsban7%2F1440956023.mp4
- # http://www.animeid.moe/stream/41TLmCj7_3q4BQLnfsban7/1440956023.mp4
- # http://www.animeid.tv/stream/oiW0uG7yqBrg5TVM5Cm34n/1385370686.mp4
- patron = '(http://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)'
- matches = re.compile(patron, re.DOTALL).findall(data)
+ data = scrapertools.find_single_match(data, '').decode("unicode-escape")
+ data = data.replace("\\/", "/").replace("%3A", ":").replace("%2F", "/")
+ patron = '(https://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)'
+ matches = scrapertools.find_multiple_matches(data, patron)
encontrados = set()
for url in matches:
if url not in encontrados:
@@ -330,15 +232,12 @@ def findvideos(item):
Item(channel=item.channel, action="play", title="[directo]", server="directo", url=url, thumbnail="",
plot="", show=item.show, folder=False))
encontrados.add(url)
-
- from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = "[" + videoitem.server + "]"
-
if url_anterior:
title_anterior = url_anterior.strip("/v/").replace('-', ' ').strip('.html')
itemlist.append(Item(channel=item.channel, action="findvideos", title="Anterior: " + title_anterior,