Actualizados
anitoonstv: agregado búsqueda rexpelis: cambio de estructura bitertv: actualizado test_video_exists fembed: patron actualizado
This commit is contained in:
@@ -9,5 +9,15 @@
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ list_servers = ['openload',
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
host = "http://www.anitoonstv.com"
|
||||
host = "https://www.anitoonstv.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -38,17 +38,45 @@ def mainlist(item):
|
||||
thumbnail=thumb_series, range=[0,19] ))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
|
||||
thumbnail=thumb_series, range=[0,19]))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
|
||||
thumbnail=thumb_series, range=[0,19]))
|
||||
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host +"/php/buscar.php"
|
||||
item.texto = texto
|
||||
if texto != '':
|
||||
return sub_search(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
post = "b=" + item.texto
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, post=post, headers=headers).data
|
||||
patron = "href='([^']+).*?"
|
||||
patron += ">([^<]+)"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action = "episodios",
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
#logger.info("Pagina para regex "+data)
|
||||
@@ -98,7 +126,7 @@ def episodios(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<div class="pagina">(.*?)</ul>'
|
||||
patron = '<div class="pagina">(.*?)cajaSocial'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>"
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
@@ -173,6 +201,8 @@ def findvideos(item):
|
||||
if "goo" in url:
|
||||
url = googl(url)
|
||||
server='netutv'
|
||||
if "hqq" in url:
|
||||
server='netutv'
|
||||
if "ok" in url:
|
||||
url = "https:"+url
|
||||
server='okru'
|
||||
|
||||
@@ -59,29 +59,6 @@ def configuracion(item):
|
||||
return ret
|
||||
|
||||
|
||||
def estrenos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'item-pelicula.*?href="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += '<p>([^<]+).*?'
|
||||
patron += '<span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
scrapedtitle = scrapedtitle.replace("Película ","")
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = host + "/suggest?que=" + texto
|
||||
@@ -104,9 +81,7 @@ def sub_search(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)')
|
||||
data = httptools.downloadpage(item.url + "&_token=" + token, headers=headers).data
|
||||
#logger.info("Intel33 %s" %data)
|
||||
data_js = jsontools.load(data)["data"]["m"]
|
||||
#logger.info("Intel44 %s" %data_js)
|
||||
for js in data_js:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
@@ -128,6 +103,51 @@ def sub_search(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_gen(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'item-pelicula.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'text-center">([^<]+).*?'
|
||||
patron += '<p>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def estrenos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'item-pelicula.*?href="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'text-center">([^<]+).*?'
|
||||
patron += '<p>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace("Película ","")
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -139,9 +159,7 @@ def peliculas(item):
|
||||
post = "page=%s&type=%s&_token=%s" %(item.page, item.type, token)
|
||||
if item.slug:
|
||||
post += "&slug=%s" %item.slug
|
||||
#logger.info("Intel11 %s" %post)
|
||||
data = httptools.downloadpage(host + "/pagination", post=post, headers=headers).data
|
||||
#logger.info("Intel11 %s" %data)
|
||||
patron = '(?s)href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'text-center">([^<]+).*?'
|
||||
@@ -215,28 +233,6 @@ def generos(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_gen(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'item-pelicula.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<p>([^<]+).*?'
|
||||
patron += '<span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle , scrapedyear in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def annos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -260,18 +256,18 @@ def annos(item):
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?i)<iframe.*?src="([^"]+).*?'
|
||||
patron += ''
|
||||
patron = "video\[(\d)+\] = '([^']+)"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
titulo = "Ver en: %s"
|
||||
for scrapedoption, scrapedurl in matches:
|
||||
tit = scrapertools.find_single_match(data, 'option%s">([^<]+)' %scrapedoption)
|
||||
if "VIP" in tit: tit = "fembed"
|
||||
titulo = "Ver en %s" %tit.capitalize()
|
||||
itemlist.append(
|
||||
item.clone(channel = item.channel,
|
||||
action = "play",
|
||||
title = titulo,
|
||||
url = scrapedurl
|
||||
url = host + "/embed/%s/" %scrapedurl
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
@@ -296,5 +292,12 @@ def findvideos(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)')
|
||||
headers = {"Referer":item.url}
|
||||
item.url = httptools.downloadpage(url, follow_redirects=False, only_headers=True, headers=headers).headers.get("location", "")
|
||||
itemlist.append(item.clone())
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
return itemlist
|
||||
|
||||
@@ -9,7 +9,7 @@ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Archive no Encontrado" in data:
|
||||
if "Archive no Encontrado" in data or "File has been removed" in data:
|
||||
return False, "[bitertv] El fichero ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://www.fembed.com/v/[A-z0-9]+)",
|
||||
"url": "\\1"
|
||||
"pattern": "((?:fembed|divload).com/v/[A-z0-9]+)",
|
||||
"url": "https://www.\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user