Arreglado AnitoonsTV
This commit is contained in:
@@ -32,7 +32,7 @@ def mainlist(item):
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Anime", url=host,
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host,
|
||||
thumbnail=thumb_series))
|
||||
@@ -52,45 +52,38 @@ def lista(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
if 'Novedades' in item.title:
|
||||
patron_cat = '<div class="activos"><h3>(.+?)<\/h2><\/a><\/div>'
|
||||
patron = '<a href="(.+?)"><h2><span>(.+?)<\/span>'
|
||||
else:
|
||||
patron_cat = '<li><a href=.+?>'
|
||||
patron_cat += str(item.title)
|
||||
patron_cat += '<\/a><div>(.+?)<\/div><\/li>'
|
||||
patron = "<a href='(.+?)'>(.+?)<\/a>"
|
||||
data = scrapertools.find_single_match(data, patron_cat)
|
||||
#logger.info("Pagina para regex "+data)
|
||||
patron = '<div class="serie">' #Encabezado regex
|
||||
patron +="<a href='(.+?)'>" #scrapedurl
|
||||
patron +="<img src='(.+?)'.+?" #scrapedthumbnail
|
||||
patron +="<p class='.+?'>(.+?)<\/p>" #scrapedtitle
|
||||
patron +=".+?<span .+?>(.+?)<\/span>" #scrapedplot
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link, name in matches:
|
||||
if "Novedades" in item.title:
|
||||
url = link
|
||||
title = name.capitalize()
|
||||
else:
|
||||
url = host + link
|
||||
title = name
|
||||
if ":" in title:
|
||||
cad = title.split(":")
|
||||
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedplot in matches:
|
||||
if ":" in scrapedtitle:
|
||||
cad = scrapedtitle.split(":")
|
||||
show = cad[0]
|
||||
else:
|
||||
if "(" in title:
|
||||
cad = title.split("(")
|
||||
if "Super" in title:
|
||||
if "(" in scrapedtitle:
|
||||
cad = scrapedtitle.split("(")
|
||||
if "Super" in scrapedtitle:
|
||||
show = cad[1]
|
||||
show = show.replace(")", "")
|
||||
else:
|
||||
show = cad[0]
|
||||
else:
|
||||
show = title
|
||||
show = scrapedtitle
|
||||
if "&" in show:
|
||||
cad = title.split("xy")
|
||||
cad = scrapedtitle.split("xy")
|
||||
show = cad[0]
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(
|
||||
item.clone(title=title, url=url, plot=show, action="episodios", show=show,
|
||||
context=context1))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
scrapedurl=host+scrapedurl
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=scrapedplot,
|
||||
thumbnail=scrapedthumbnail, action="episodios", show=show, context=context))
|
||||
#tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -102,7 +95,7 @@ def episodios(item):
|
||||
|
||||
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
patron_caps = "<a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
|
||||
patron_caps = "<li><a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
|
||||
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")
|
||||
@@ -150,29 +143,28 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data_vid = scrapertools.find_single_match(data1, '<div class="videos">(.+?)<\/div><div .+?>')
|
||||
|
||||
data_vid = scrapertools.find_single_match(data1, 'var q = \[ \[(.+?)\] \]')
|
||||
logger.info("h12ghcvh1 "+data_vid)
|
||||
# name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>')
|
||||
scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>')
|
||||
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
|
||||
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
|
||||
for server, quality, url in itemla:
|
||||
if "HQ" in quality:
|
||||
quality = "HD"
|
||||
if "Calidad Alta" in quality:
|
||||
quality = "HQ"
|
||||
if " Calidad media - Carga mas rapido" in quality:
|
||||
quality = "360p"
|
||||
server = server.lower().strip()
|
||||
if "ok" in server:
|
||||
server = 'okru'
|
||||
if "rapid" in server:
|
||||
server = 'rapidvideo'
|
||||
if "netu" in server:
|
||||
server = 'netutv'
|
||||
itemla = scrapertools.find_multiple_matches(data_vid, '"(.+?)"')
|
||||
for url in itemla:
|
||||
url=url.replace('\/', '/')
|
||||
server1=url.split('/')
|
||||
server=server1[2]
|
||||
if "." in server:
|
||||
server1=server.split('.')
|
||||
if len(server1)==3:
|
||||
server=server1[1]
|
||||
else:
|
||||
server=server1[0]
|
||||
if "goo" in url:
|
||||
url = googl(url)
|
||||
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
server='netutv'
|
||||
quality="360p"
|
||||
itemlist.append(item.clone(url=url, action="play",
|
||||
thumbnail=scrapedthumbnail, server=server, plot=scrapedplot,
|
||||
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
Reference in New Issue
Block a user