@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.29" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.30" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,13 +19,9 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ pack +18 ¤ cinehindi ¤ anonfile
|
||||
¤ fembed ¤ doomtv ¤ vk
|
||||
¤ vshare ¤ CineCalidad ¤ seriesblanco
|
||||
¤ dospelis
|
||||
¤ newpct1 ¤ inkapelis ¤ mp4upload
|
||||
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ cineonline ¤ pelix
|
||||
¤ Agradecimientos a @mac12m99 por colaborar con ésta versión
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -25,14 +25,14 @@ def mainlist(item):
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
|
||||
url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0))
|
||||
url=urlparse.urljoin(host, "/category/pelicula"), type='pl', pag=1))
|
||||
#itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
# url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
|
||||
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -43,18 +43,18 @@ def category(item):
|
||||
data = httptools.downloadpage(host).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
if item.cat == 'abc':
|
||||
data = scrapertools.find_single_match(data, '<span>Orden Alfabético</span>.*?</ul>')
|
||||
data = scrapertools.find_single_match(data, '<div class="Body Container">(.+?)<main>')
|
||||
elif item.cat == 'genre':
|
||||
data = scrapertools.find_single_match(data, '<span>Géneros</span>.*?</ul>')
|
||||
data = scrapertools.find_single_match(data, '<a>Géneros<\/a><ul class="sub.menu">(.+?)<a>Año<\/a>')
|
||||
elif item.cat == 'year':
|
||||
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
|
||||
data = scrapertools.find_single_match(data, '<a>Año<\/a><ul class="sub.menu">(.+?)<a>Idioma<\/a>')
|
||||
elif item.cat == 'quality':
|
||||
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
|
||||
patron = "<li.*?>([^<]+)<a href='([^']+)'>"
|
||||
data = scrapertools.find_single_match(data, '<a>Calidad<\/a><ul class="sub-menu">(.+?)<a>Géneros<\/a>')
|
||||
patron = '<li.*?><a href="(.*?)">(.*?)<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0))
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', pag=0))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -63,6 +63,7 @@ def search_results(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info(data)
|
||||
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
|
||||
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -90,28 +91,6 @@ def search(item, texto):
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace('"ep0','"epp"')
|
||||
patron = '(?is)<div id="ep(\d+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '(href.*?)fa fa-download'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedepi, scrapedthumbnail, scrapedurls in matches:
|
||||
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
|
||||
urls = scrapertools.find_multiple_matches(scrapedurls, 'href="([^"]+)')
|
||||
itemlist.append(item.clone(action='findvideos', title=title, url=item.url, thumbnail=scrapedthumbnail, type=item.type,
|
||||
urls = urls, infoLabels=item.infoLabels))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
|
||||
url=item.url, action="add_serie_to_library", extra="episodios",
|
||||
contentSerieName=item.contentSerieName))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
next = True
|
||||
@@ -119,64 +98,37 @@ def lista(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
css_data = scrapertools.find_single_match(data, "<style id='page-skin-1' type='text/css'>(.*?)</style>")
|
||||
|
||||
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
|
||||
|
||||
patron = '<span class="([^"]+)">.*?<figure class="poster-bg">(.*?)<img src="([^"]+)" />'
|
||||
patron += '(.*?)</figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
|
||||
|
||||
patron = '<article .*?">'
|
||||
patron += '<a href="([^"]+)"><.*?><figure.*?>' #scrapedurl
|
||||
patron += '<img.*?src="([^"]+)".*?>.*?' #scrapedthumbnail
|
||||
patron += '<h3 class=".*?">([^"]+)<\/h3>' #scrapedtitle
|
||||
patron += '<span.*?>([^"]+)<\/span>.+?' #scrapedyear
|
||||
patron += '<a.+?>([^"]+)<\/a>' #scrapedtype
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
first = int(item.first)
|
||||
last = first + 19
|
||||
if last > len(matches):
|
||||
last = len(matches)
|
||||
next = False
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
title="%s - %s" % (scrapedtitle,scrapedyear)
|
||||
|
||||
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle, scrapedurl in matches[first:last]:
|
||||
year = scrapertools.find_single_match(scrapedyear, '<span>(\d{4})</span>')
|
||||
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
type=scrapedtype, infoLabels={'year':scrapedyear})
|
||||
|
||||
if not year:
|
||||
class_year = scrapertools.find_single_match(scrapedyear, 'class="([^\"]+)"')
|
||||
year = scrapertools.find_single_match(css_data, "\." + class_year + ":after {content:'(\d{4})';}")
|
||||
if not year:
|
||||
year = scrapertools.find_single_match(data, "headline'>(\d{4})</h2>")
|
||||
|
||||
qual = ""
|
||||
if scrapedquality:
|
||||
patron_qualities='<i class="([^"]+)"></i>'
|
||||
qualities = scrapertools.find_multiple_matches(scrapedquality, patron_qualities)
|
||||
|
||||
for quality in qualities:
|
||||
patron_desc = "\." + quality + ":after {content:'([^\']+)';}"
|
||||
quality_desc = scrapertools.find_single_match(css_data, patron_desc)
|
||||
|
||||
qual = qual+ "[" + quality_desc + "] "
|
||||
|
||||
title="%s [%s] %s" % (scrapedtitle,year,qual)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
|
||||
type=scrapedtype, infoLabels={'year':year})
|
||||
|
||||
if scrapedtype.strip() == 'sr':
|
||||
if scrapedtype == 'sr':
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
new_item.action = 'episodios'
|
||||
else:
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.action = 'findvideos'
|
||||
|
||||
if scrapedtype == item.type or item.type == 'cat':
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
#pagination
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
if next:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
|
||||
|
||||
pag = item.pag + 1
|
||||
url_next_page = item.url+"/page/"+str(pag)+"/"
|
||||
if len(itemlist)>19:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', pag=pag))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -1315,7 +1315,7 @@ def findvideos(item):
|
||||
#Renombramos el canal al nombre de clone elegido. Actualizados URL
|
||||
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
|
||||
item.channel_host = host
|
||||
item.category = host.capitalize()
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
|
||||
verify_fo = True #Verificamos si el clone a usar está activo
|
||||
item, data = generictools.fail_over_newpct1(item, verify_fo)
|
||||
@@ -1446,8 +1446,9 @@ def findvideos(item):
|
||||
if scrapertools.find_single_match(data, patron):
|
||||
patron = patron_alt
|
||||
url_torr = scrapertools.find_single_match(data, patron)
|
||||
if not url_torr.startswith("http"): #Si le falta el http.: lo ponemos
|
||||
url_torr = scrapertools.find_single_match(host, '(\w+:)//') + url_torr
|
||||
if not url_torr.startswith("http"): #Si le falta el http.: lo ponemos
|
||||
url_torr = scrapertools.find_single_match(item.channel_host, '(\w+:)//') + url_torr
|
||||
|
||||
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
|
||||
if not data or not scrapertools.find_single_match(data, patron) or not videolibrarytools.verify_url_torrent(url_torr): # Si no hay datos o url, error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
@@ -1509,6 +1510,9 @@ def findvideos(item):
|
||||
patron = 'class="btn-torrent">.*?window.location.href = "(.*?)";' #Patron para .torrent
|
||||
if not scrapertools.find_single_match(data, patron):
|
||||
patron = '<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"' #Patron para .torrent (planetatorrent)
|
||||
url_torr = scrapertools.find_single_match(data, patron)
|
||||
if not url_torr.startswith("http"): #Si le falta el http.: lo ponemos
|
||||
url_torr = scrapertools.find_single_match(item.channel_host, '(\w+:)//') + url_torr
|
||||
|
||||
#buscamos el tamaño del .torrent
|
||||
size = scrapertools.find_single_match(data, '<div class="entry-left".*?><a href=".*?span class=.*?>Size:<\/strong>?\s(\d+?\.?\d*?\s\w[b|B])<\/span>')
|
||||
@@ -1884,11 +1888,11 @@ def episodios(item):
|
||||
season_display = item.from_num_season_colapse
|
||||
|
||||
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
|
||||
if not item.infoLabels['tmdb_id']:
|
||||
try:
|
||||
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
|
||||
except:
|
||||
pass
|
||||
#if not item.infoLabels['tmdb_id']:
|
||||
try:
|
||||
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
|
||||
except:
|
||||
pass
|
||||
|
||||
modo_ultima_temp_alt = modo_ultima_temp
|
||||
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
|
||||
@@ -1972,6 +1976,7 @@ def episodios(item):
|
||||
num_temporadas_flag = True
|
||||
else:
|
||||
num_temporadas_flag = False
|
||||
|
||||
for page in list_pages: #Recorre la lista de páginas
|
||||
if not list_pages:
|
||||
break
|
||||
@@ -2075,7 +2080,7 @@ def episodios(item):
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#Si no se encuentran valores, pero poner lo básico
|
||||
#Si no se encuentran valores, se pone lo básico
|
||||
if match['season'] is None or match['season'] == "0" or not match['season']: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
try:
|
||||
@@ -2085,6 +2090,7 @@ def episodios(item):
|
||||
if match['season'] > max_temp:
|
||||
logger.error("ERROR 07: EPISODIOS: Error en número de Temporada o Episodio: " + " / TEMPORADA/EPISODIO: " + str(match['season']) + " / " + str(match['episode']) + " / NUM_TEMPORADA: " + str(max_temp) + " / " + str(season) + " / MATCHES: " + str(matches))
|
||||
match['season'] = scrapertools.find_single_match(item_local.url, '\/[t|T]emp\w+-*(\d+)\/')
|
||||
num_temporadas_flag = False
|
||||
if not match['season']:
|
||||
match['season'] = season_alt
|
||||
else:
|
||||
|
||||
@@ -226,10 +226,40 @@ def findvideos(item):
|
||||
for id in buttons:
|
||||
new_url = golink(int(id), _sa, sl)
|
||||
data_new = httptools.downloadpage(new_url).data
|
||||
_x0x = scrapertools.find_single_match(data_new, 'var x0x = ([^;]+);')
|
||||
logger.info(data_new)
|
||||
valor = scrapertools.find_single_match(data_new, '\+ x92\((.*?)\)\+ ')
|
||||
valores = valor.split("atob")
|
||||
valor2 = valores[1].replace('(','').replace(')','')
|
||||
valor1 = valores[0].split('+')
|
||||
datos = []
|
||||
logger.info("f4d5as6f")
|
||||
logger.info(valor1)
|
||||
stringTodo = ''
|
||||
for val in valor1:
|
||||
if '()' in val:
|
||||
funcion = val.split('(')[0]
|
||||
scrapedvalue = scrapertools.find_single_match(data_new, funcion+'.+?return (.+?);')
|
||||
datos.append(scrapedvalue)
|
||||
elif '.charAt' in val:
|
||||
funcion = val.split('.charAt(')
|
||||
stringTodo = funcion[0]
|
||||
position = funcion[1].split(')')[0]
|
||||
posiciones = []
|
||||
logger.info(datos)
|
||||
if datos:
|
||||
for dato in datos:
|
||||
logger.info(dato)
|
||||
try:
|
||||
posiciones.append(int(dato))
|
||||
except Exception as e:
|
||||
scrapedvalue = scrapertools.find_single_match(data_new, 'var %s = (.+?);' % (dato))
|
||||
logger.info("scrapedvalue")
|
||||
logger.info(scrapedvalue)
|
||||
posiciones.append(int(scrapedvalue))
|
||||
logger.info("positiones"+posiciones)
|
||||
try:
|
||||
x0x = eval(_x0x)
|
||||
url = base64.b64decode(gktools.transforma_gsv(x0x[4], base64.b64decode(x0x[1])))
|
||||
logger.info(base64.b64decode(data1, data2))
|
||||
url = x92(data1, data2)
|
||||
if 'download' in url:
|
||||
url = url.replace('download', 'preview')
|
||||
title = '%s'
|
||||
@@ -250,11 +280,65 @@ def findvideos(item):
|
||||
def golink (num, sa, sl):
|
||||
import urllib
|
||||
b = [3, 10, 5, 22, 31]
|
||||
d = ''
|
||||
for i in range(len(b)):
|
||||
d += sl[2][b[i]+num:b[i]+num+1]
|
||||
#d = ''
|
||||
#for i in range(len(b)):
|
||||
# d += sl[2][b[i]+num:b[i]+num+1]
|
||||
|
||||
SVR = "https://viteca.stream" if sa == 'true' else "http://serieslan.com"
|
||||
TT = "/" + urllib.quote_plus(sl[3].replace("/", "><")) if num == 0 else ""
|
||||
url_end = link(num,sl)
|
||||
#return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
|
||||
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + url_end + TT
|
||||
|
||||
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
|
||||
def link(ida,sl):
|
||||
a=ida
|
||||
b=[3,10,5,22,31]
|
||||
c=1
|
||||
d=""
|
||||
e=sl[2]
|
||||
for i in range(len(b)):
|
||||
d=d+substr(e,b[i]+a,c)
|
||||
return d
|
||||
|
||||
def substr(st,a,b):
|
||||
return st[a:a+b]
|
||||
|
||||
def x92(data1, data2):
|
||||
data3 = []
|
||||
data4 = 0
|
||||
data5 = ""
|
||||
data6 = ""
|
||||
for i in range(len(256)):
|
||||
data3[i] = i
|
||||
for i in range(len(256)):
|
||||
data4 = (data4 + data3[i] + ord(data1[i])) % 256
|
||||
data5 = data3[i]
|
||||
data3[i] = data3[data4]
|
||||
data3[data4] = data5
|
||||
i = 0
|
||||
data4 = 0
|
||||
for j in range(len(data2)):
|
||||
i = (i + 1) % 256
|
||||
data4 = (data4 + data3[i]) % 256
|
||||
data5 = data3[i]
|
||||
data3[i] = data3[data4]
|
||||
data3[data4] = data5
|
||||
data6 =1#+= str(unichr(data2[ord(str(j)) ^ data3[(data3[i] + data3[data4]) % 256]))
|
||||
return data6
|
||||
|
||||
def _ieshlgagkP(umZFJ):
|
||||
return umZFJ
|
||||
def _RyHChsfwdd(ZBKux):
|
||||
return ZBKux
|
||||
def _eladjkKtjf(czuwk):
|
||||
return czuwk
|
||||
def _slSekoKrHb():
|
||||
return ''
|
||||
def _VySdeBApGO():
|
||||
return 'Z'
|
||||
|
||||
def _nEgqhkiRub():
|
||||
return 28
|
||||
|
||||
def _lTjZxWGNnE():
|
||||
return 57
|
||||
@@ -1494,8 +1494,9 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
fail_over = settings['default'] #Carga lista de clones
|
||||
break
|
||||
fail_over_list = ast.literal_eval(fail_over)
|
||||
#logger.debug(str(fail_over_list))
|
||||
|
||||
if item.from_channel and item.from_channel != 'videolibrary': #Desde search puede venir con el nombre de canal equivocado
|
||||
if item.from_channel and item.from_channel != 'videolibrary': #Desde search puede venir con el nombre de canal equivocado
|
||||
item.channel = item.from_channel
|
||||
#Recorremos el Array identificando el canal que falla
|
||||
for active, channel, channel_host, contentType, action_excluded in fail_over_list:
|
||||
@@ -1508,10 +1509,11 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
channel_failed = channel #salvamos el nombre del canal o categoría
|
||||
channel_host_failed = channel_host #salvamos el nombre del host
|
||||
channel_url_failed = item.url #salvamos la url
|
||||
#logger.debug(channel_failed + ' / ' + channel_host_failed)
|
||||
|
||||
if patron == True and active == '1': #solo nos han pedido verificar el clone
|
||||
return (item, data) #nos vamos, con el mismo clone, si está activo
|
||||
if (item.action == 'episodios' or item.action == 'findvideos') and item.contentType not in contentType: #soporta el fail_over de este contenido?
|
||||
if (item.action == 'episodios' or item.action == "update_tvshow" or item.action == "get_seasons" or item.action == 'findvideos') and item.contentType not in contentType: #soporta el fail_over de este contenido?
|
||||
logger.error("ERROR 99: " + item.action.upper() + ": Acción no soportada para Fail-Over en canal: " + item.url)
|
||||
return (item, data) #no soporta el fail_over de este contenido, no podemos hacer nada
|
||||
break
|
||||
@@ -1526,7 +1528,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
data_alt = ''
|
||||
if channel == channel_failed or active == '0' or item.action in action_excluded or item.extra2 in action_excluded: #es válido el nuevo canal?
|
||||
continue
|
||||
if (item.action == 'episodios' or item.action == 'findvideos') and item.contentType not in contentType: #soporta el contenido?
|
||||
if (item.action == 'episodios' or item.action == "update_tvshow" or item.action == "get_seasons" or item.action == 'findvideos') and item.contentType not in contentType: #soporta el contenido?
|
||||
continue
|
||||
|
||||
#Hacemos el cambio de nombre de canal y url, conservando las anteriores como ALT
|
||||
@@ -1536,12 +1538,16 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
item.category = channel.capitalize()
|
||||
item.url_alt = channel_url_failed
|
||||
item.url = channel_url_failed
|
||||
item.url = item.url.replace(channel_host_failed, channel_host)
|
||||
channel_host_bis = re.sub(r'(?i)http.*://', '', channel_host)
|
||||
channel_host_failed_bis = re.sub(r'(?i)http.*://', '', channel_host_failed)
|
||||
item.url = item.url.replace(channel_host_failed_bis, channel_host_bis)
|
||||
|
||||
url_alt += [item.url] #salvamos la url para el bucle
|
||||
item.channel_host = channel_host
|
||||
#logger.debug(str(url_alt))
|
||||
|
||||
#quitamos el código de series, porque puede variar entre webs
|
||||
if item.action == "episodios" or item.action == "get_seasons":
|
||||
if item.action == "episodios" or item.action == "get_seasons" or item.action == "update_tvshow":
|
||||
item.url = re.sub(r'\/\d+\/?$', '', item.url) #parece que con el título solo ecuentra la serie, normalmente...
|
||||
url_alt = [item.url] #salvamos la url para el bucle, pero de momento ignoramos la inicial con código de serie
|
||||
|
||||
@@ -1602,9 +1608,11 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
#Función especial para encontrar en otro clone un .torrent válido
|
||||
if verify_torrent == 'torrent:check:status':
|
||||
from core import videolibrarytools
|
||||
if not data_alt.startswith("http"): #Si le falta el http.: lo ponemos
|
||||
data_alt = scrapertools.find_single_match(item.channel_host, '(\w+:)//') + data_alt
|
||||
if videolibrarytools.verify_url_torrent(data_alt): #verificamos si el .torrent existe
|
||||
item.url = url #guardamos la url que funciona
|
||||
break #nos vamos, con la nueva url del .torrent verificada
|
||||
break #nos vamos, con la nueva url del .torrent verificada
|
||||
data = ''
|
||||
continue #no vale el .torrent, continuamos
|
||||
item.url = url #guardamos la url que funciona, sin verificar
|
||||
|
||||
@@ -34,7 +34,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
|
||||
|
||||
try:
|
||||
code = scrapertools.find_single_match(data, '<p style="" id="[^"]+">(.*?)</p>' )
|
||||
code = scrapertools.find_single_match(data, '<p id="[^"]+" style="">(.*?)</p>' )
|
||||
_0x59ce16 = eval(scrapertools.find_single_match(data, '_0x59ce16=([^;]+)').replace('parseInt', 'int'))
|
||||
_1x4bfb36 = eval(scrapertools.find_single_match(data, '_1x4bfb36=([^;]+)').replace('parseInt', 'int'))
|
||||
parseInt = eval(scrapertools.find_single_match(data, '_0x30725e,(\(parseInt.*?)\),').replace('parseInt', 'int'))
|
||||
|
||||
Reference in New Issue
Block a user