Correcciones y novedades

Correcciones:

- DoomTv: Corrección por cambio de estructura
- FanPelis: Corrección por cambio de estructura
- Goovie: Corrección por cambio de estructura
- InkaPelis: Corrección sección novedades
- PeliculonHD: Corrección por cambio de dominio
-PelisFox: Corrección en la búsqueda
-PelisGratis: Mejora en el código
- RetroseriesTV: Corrección por cambio de estructura

Novedades:

- Legalmente Gratis (clasicos): Nuevos canal
This commit is contained in:
Alfa-beto
2019-02-13 12:01:48 -03:00
committed by GitHub
parent 4d50c631e6
commit a17dfbf732
10 changed files with 323 additions and 70 deletions

View File

@@ -68,16 +68,25 @@ def mainlist(item):
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def lista(item):
logger.info()
itemlist = []
next = False
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'movie-id=.*?href=(.*?) data-url.*?quality>(.*?)'
patron += '<img data-original=(.*?) class.*?<h2>(.*?)<\/h2>.*?<p>(.*?)<\/p>'
data = get_source(item.url)
patron = 'movie-id=.*?href="([^"]+)" data-url.*?quality">([^<]+)<.*?img data-original="([^"]+)" class.*?'
patron += '<h2>([^<]+)<\/h2>.*?<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -89,7 +98,7 @@ def lista(item):
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]:
url = scrapedurl
url = 'http:'+scrapedurl
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
@@ -114,7 +123,7 @@ def lista(item):
url_next_page = item.url
first = last
else:
url_next_page = scrapertools.find_single_match(data, "<a href=([^ ]+) class=page-link aria-label=Next>")
url_next_page = scrapertools.find_single_match(data, "<li class='active'>.*?class='page larger' href='([^']+)'")
first = 0
if url_next_page:
@@ -128,14 +137,14 @@ def seccion(item):
itemlist = []
duplicado = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'menu-item-object-category menu-item-\d+><a href=(.*?)>(.*?)<\/a><\/li>'
data = get_source(item.url)
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
url = 'http:'+ scrapedurl
title = scrapedtitle
thumbnail = ''
if url not in duplicado:
@@ -163,7 +172,6 @@ def newest(categoria):
logger.info()
itemlist = []
item = Item()
# categoria='peliculas'
try:
if categoria in ['peliculas', 'latino']:
item.url = host +'peliculas/page/1'
@@ -186,14 +194,15 @@ def newest(categoria):
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|frameborder|><\/script>)'
data = get_source(item.url)
patron = 'id="(tab\d+)"><div class="movieplay">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
if 'http' not in urls:
urls = 'https:'+urls
new_item = Item(
channel=item.channel,
url=urls,

View File

@@ -16,7 +16,7 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
host = "http://fanpelis.com/"
host = "https://fanpelis.com/"
def mainlist(item):
logger.info()

View File

@@ -44,6 +44,8 @@ def mainlist(item):
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', action='sub_menu', type='series',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title='Colecciones', action='list_collections',
url= host+'listas=populares', thumbnail=get_thumb('colections', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'search?go=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
@@ -66,10 +68,13 @@ def sub_menu(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -93,7 +98,7 @@ def section(item):
data = scrapertools.find_single_match(data, 'Generos.*?</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, 'Años.*?</ul>')
patron = "<li onclick=filter\(this, '([^']+)', \d+\);>"
patron = '<li onclick="filter\(this, \'([^\']+)\', \d+\);">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
@@ -112,8 +117,8 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<article class=Item><a href=([^>]+)><div class=Poster>'
patron += '<img src=(.+?)(?:>|alt).*?<h2>([^>]+)</h2>.*?</article>'
patron = '<article class="Item"><a href="([^>]+)"><div class="Poster"><img src="([^"]+)".*?'
patron += '<h2>([^>]+)</h2>.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
@@ -128,10 +133,9 @@ def list_all(item):
title=title,
url=url,
thumbnail=thumbnail,
plot=thumbnail,
infoLabels={'filtro':filter_list})
if item.type == 'peliculas':
if item.type == 'peliculas' or 'peliculas' in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
@@ -143,19 +147,38 @@ def list_all(item):
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=(.*?) />")
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def list_collections(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li><a href="([^"]+)">.*?"first-lIMG"><img src="([^"]+)">.*?<h2>([^<]+)</h2>.*?Fichas:?\s(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumb, title, cant in matches:
plot = 'Contiene %s elementos' % cant
itemlist.append(Item(channel=item.channel, action='list_all', title=title, url=url, thumbnail=thumb, plot=plot))
url_next_page = scrapertools.find_single_match(data, 'class="PageActiva">\d+</a><a href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_collections'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<div class=season temporada-(\d+)>'
patron='<div class="season temporada-(\d+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -189,7 +212,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron= "<li><a href=([^>]+)><b>%s - (\d+)</b><h2 class=eTitle>([^>]+)</h2>" % item.infoLabels['season']
patron= '<li><a href="([^"]+)"><b>%s - (\d+)</b><h2 class="eTitle">([^>]+)</h2>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -207,31 +230,52 @@ def episodesxseasons(item):
return itemlist
def findvideos(item):
from lib.generictools import privatedecrypt
logger.info()
itemlist = []
data = get_source(item.url)
patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>"
data = data.replace('"', "'")
patron = "onclick='clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);'>.*?<b>([a-zA-Z]+)"
matches = re.compile(patron, re.DOTALL).findall(data)
headers = {'referer': item.url}
for url, quality, language in matches:
for url, quality, language, server in matches:
url = privatedecrypt(url, headers)
if url != '':
language = IDIOMAS[language]
if quality.lower() == 'premium':
quality = '720p'
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
if 'visor/vdz' in url:
server = 'powvideo'
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,
quality=quality, infoLabels=item.infoLabels))
quality=quality, server=server, headers=headers, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return sorted(itemlist, key=lambda i: i.language)
def play(item):
from lib.generictools import privatedecrypt
logger.info()
itemlist = []
url = ''
item.server = ''
data = httptools.downloadpage(item.url, headers=item.headers, follow_redirects=False)
if 'visor/vdz' in item.url:
url = scrapertools.find_single_match(data.data, 'IFRAME SRC="([^"]+)"')
elif 'visor/if' in item.url:
url = data.headers['location']
itemlist.append(Item(channel=item.channel, url=url, action='play', server=item.server,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def search(item, texto):
logger.info()

View File

@@ -292,7 +292,8 @@ def entradas(item):
else:
# Extrae las entradas
if item.extra == "Novedades":
data2 = data.split("<h3>Últimas Películas Agregadas</h3>", 1)[1]
data2 = data.split("<h3>Últimas Películas Agregadas y Actualizadas</h3>", 1)[1]
entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
else:
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')

View File

@@ -0,0 +1,37 @@
{
"id": "legalmentegratis",
"name": "Legalmente Gratis (clasicos)",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "https://i.postimg.cc/NFGv0pN3/legalgratis.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,139 @@
# -*- coding: utf-8 -*-
# -*- Channel Legalmente Gratis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'http://legalmentegratis.com/'
IDIOMAS = {'español':'CAST', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['youtube']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article id="post-\d+".*?href="([^"]+)".*?src="([^"]+)".*?<p>(.*?) (\(?\d{4}\)?)([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot in matches:
url = scrapedurl
contentTitle = scrapedtitle
year = re.sub(r'\(|\)','', year)
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = sorted(itemlist, key=lambda it: it.contentTitle)
# Paginación
url_next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'genre':
data = scrapertools.find_single_match(data, '>Género(.*?)</ul>')
patron = 'href="([^"]+)".*?>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
new_item = Item(channel=item.channel, title=title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
lang_data = scrapertools.find_single_match(data, '<p><strong(.*?)</strong></p>')
if 'español' in lang_data:
language = 'español'
else:
language = 'VOSE'
url = scrapertools.find_single_match (data, '<iframe.*?src="([^"]+)"')
if 'gloria.tv' in url:
new_data = get_source(url)
url = 'https://gloria.tv'+ scrapertools.find_single_match(new_data, '<source type=".*?" src="([^"]+)">')
itemlist.append(Item(channel=item.channel, title='%s', action='play', url=url,
language=IDIOMAS[language], infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s' % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist

View File

@@ -39,7 +39,7 @@ list_servers = [
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculonhd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculonhd')
host = 'https://peliculonhd.com/'
host = 'https://peliculonhd.tv/'
def mainlist(item):
logger.info()
@@ -50,7 +50,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='list_all', type='tv',
itemlist.append(Item(channel=item.channel, title='Series', url=host+'ver-serie', action='list_all', type='tv',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
@@ -65,7 +65,7 @@ def menu_movies(item):
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'ver', action='list_all',
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'ver-pelicula', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movie'))
@@ -145,8 +145,8 @@ def list_all(item):
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality=quality,
contentTitle= contentTitle,
type=item.type,
infoLabels={'year':year}))
@@ -221,7 +221,8 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
data = data.replace('"','\'')
patron="class='numerando'>%s - (\d+)</div><div class='episodiotitle'>.?<a href='([^']+)'>([^<]+)<" % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -262,7 +263,7 @@ def findvideos(item):
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':type}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
test_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
if 'xyz' in test_url:

View File

@@ -60,7 +60,7 @@ def mainlist(item):
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/api/suggest?query=',
url=host + '/api/suggest/?query=',
thumbnail=get_thumb('search', auto=True)
))
@@ -185,31 +185,31 @@ def seccion(item):
def busqueda(item):
logger.info()
itemlist = []
headers = {'referer':host, 'X-Requested-With': 'XMLHttpRequest'}
headers = {'referer':host, 'X-Requested-With': 'XMLHttpRequest',
'Accept': 'application/json, text/javascript, */*; q=0.01'}
data = httptools.downloadpage(item.url, headers=headers).data
dict_data = jsontools.load(data)
resultados = dict_data['suggest']['result'][0]['options']
resultados = dict_data['data']['m']
for resultado in resultados:
if 'title' in resultado['_source']:
title = resultado['_source']['title']
thumbnail = 'https://static.pelisfox.tv/static/movie' + '/' + resultado['_source']['cover']
plot = resultado['_source']['sinopsis']
url = host + resultado['_source']['url'] + '/'
title = resultado['title']
thumbnail = 'https://static.pelisfox.tv/' + '/' + resultado['cover']
plot = resultado['synopsis']
url = host + resultado['slug'] + '/'
itemlist.append(item.clone(title=title,
thumbnail=thumbnail,
plot=plot,
url=url,
action='findvideos',
contentTitle=title
))
itemlist.append(item.clone(title=title,
thumbnail=thumbnail,
plot=plot,
url=url,
action='findvideos',
contentTitle=title
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
texto = texto.replace(" ", "%20")
item.url = item.url + texto
if texto != '':
@@ -224,6 +224,7 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
links = scrapertools.find_single_match(data, '<script>var.*?_SOURCE.?=.?(.*?);')
links = links.replace('null', '"null"')
links = links.replace('false', '"false"').replace('true', '"true"')
links = eval(links)
for link in links:

View File

@@ -101,10 +101,13 @@ def mainlist(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url, add_referer=True).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -112,18 +115,19 @@ def lista(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class=(?:MvTbImg|TPostMv).*?href=(.*?)\/(?:>| class).*?src=(.*?) '
patron += 'class=Title>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?(?:<td|class=Description)>(.*?)<(?:\/td|\/p)>'
patron = 'article id=post-\d+.*?href=([^>]+)>.*?src=(.*?)\s.*?"Title">([^<]+)<(.*?)</a>.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle, year_data, scrapedplot in matches:
year = scrapertools.find_single_match(year_data, 'Year>(\d{4})<')
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot
quality = ''
contentTitle = scrapedtitle
title = contentTitle
year = scrapedyear
year = year
itemlist.append(item.clone(action='findvideos',
title=title,
@@ -139,7 +143,7 @@ def lista(item):
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<a class=nextpostslink rel=next href=(.*?)>')
next_page = scrapertools.find_single_match(data, 'href=([^>]+)>Siguiente &raquo;</a>')
if next_page != '':
itemlist.append(item.clone(action="lista",
title='Siguiente >>>',
@@ -154,16 +158,15 @@ def seccion(item):
itemlist = []
data = get_source(item.url)
if item.extra == 'generos':
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)</a><\/li>'
patron = 'menu-item-object-category.*?<a href=([^<]+)>([^<]+)</a>'
elif item.extra == 'a-z':
patron = '<li><a href=(.*?)>(\w|#)<\/a><\/li>'
patron = '<li><a href=([^<]+)>(\w|#)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
thumbnail = ''
if item.extra == 'generos':
#cantidad = re.findall(r'.*?<\/a> \((\d+)\)', scrapedtitle)
title = scrapedtitle
else:
title = scrapedtitle
@@ -192,7 +195,6 @@ def findvideos(item):
data = data.replace("&lt;","<").replace("&quot;",'"').replace("&gt;",">").replace("&amp;","&").replace('\"',"")
patron = '<div class=TPlayerTb.*?id=(.*?)>.*?src=(.*?) frameborder'
matches = scrapertools.find_multiple_matches(data, patron)
headers = {'referer':item.url}
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
if 'trembed' in urls_page:
@@ -201,6 +203,8 @@ def findvideos(item):
urls_page = scrapertools.find_single_match(sub_data, 'src="([^"]+)" ')
if "repro.live" in urls_page:
server_repro(urls_page)
if "repros.live" in urls_page:
server_repros(urls_page)
if "itatroniks.com" in urls_page:
server_itatroniks(urls_page)
for url in new_data:
@@ -233,6 +237,20 @@ def server_itatroniks(urls_page):
new_data.append(urls_page)
def server_repros(urls_page):
logger.info()
headers = {"Referer":host}
headers1 = {"X-Requested-With":"XMLHttpRequest"}
sub_data = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
for idurl in urls_page1:
#post = {"codigo":idurl}
#post = urllib.urlencode(post)
dd1 = httptools.downloadpage("https://repros.live/player/ajaxdata", post = urllib.urlencode({"codigo":idurl}), headers = headers1).data
data_json = jsontools.load(dd1)
new_data.append(data_json["url"])
def server_repro(urls_page):
logger.info()
headers = {"Referer":urls_page}

View File

@@ -46,9 +46,12 @@ def mainlist(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
if referer is None:
data = httptools.downloadpage(url, ignore_response_code=True).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data