Merge pull request #555 from Alfa-beto/fixes

Correciones y Novedades
This commit is contained in:
Alfa
2019-02-13 14:39:49 -05:00
committed by GitHub
12 changed files with 655 additions and 347 deletions

View File

@@ -68,16 +68,25 @@ def mainlist(item):
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def lista(item):
logger.info()
itemlist = []
next = False
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'movie-id=.*?href=(.*?) data-url.*?quality>(.*?)'
patron += '<img data-original=(.*?) class.*?<h2>(.*?)<\/h2>.*?<p>(.*?)<\/p>'
data = get_source(item.url)
patron = 'movie-id=.*?href="([^"]+)" data-url.*?quality">([^<]+)<.*?img data-original="([^"]+)" class.*?'
patron += '<h2>([^<]+)<\/h2>.*?<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -89,7 +98,7 @@ def lista(item):
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]:
url = scrapedurl
url = 'http:'+scrapedurl
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
@@ -114,7 +123,7 @@ def lista(item):
url_next_page = item.url
first = last
else:
url_next_page = scrapertools.find_single_match(data, "<a href=([^ ]+) class=page-link aria-label=Next>")
url_next_page = scrapertools.find_single_match(data, "<li class='active'>.*?class='page larger' href='([^']+)'")
first = 0
if url_next_page:
@@ -128,14 +137,14 @@ def seccion(item):
itemlist = []
duplicado = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'menu-item-object-category menu-item-\d+><a href=(.*?)>(.*?)<\/a><\/li>'
data = get_source(item.url)
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
url = 'http:'+ scrapedurl
title = scrapedtitle
thumbnail = ''
if url not in duplicado:
@@ -163,7 +172,6 @@ def newest(categoria):
logger.info()
itemlist = []
item = Item()
# categoria='peliculas'
try:
if categoria in ['peliculas', 'latino']:
item.url = host +'peliculas/page/1'
@@ -186,14 +194,15 @@ def newest(categoria):
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|frameborder|><\/script>)'
data = get_source(item.url)
patron = 'id="(tab\d+)"><div class="movieplay">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
if 'http' not in urls:
urls = 'https:'+urls
new_item = Item(
channel=item.channel,
url=urls,

View File

@@ -16,7 +16,7 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
host = "http://fanpelis.com/"
host = "https://fanpelis.com/"
def mainlist(item):
logger.info()

View File

@@ -44,6 +44,8 @@ def mainlist(item):
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', action='sub_menu', type='series',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title='Colecciones', action='list_collections',
url= host+'listas=populares', thumbnail=get_thumb('colections', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'search?go=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
@@ -66,10 +68,13 @@ def sub_menu(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -93,7 +98,7 @@ def section(item):
data = scrapertools.find_single_match(data, 'Generos.*?</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, 'Años.*?</ul>')
patron = "<li onclick=filter\(this, '([^']+)', \d+\);>"
patron = '<li onclick="filter\(this, \'([^\']+)\', \d+\);">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
@@ -112,8 +117,8 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<article class=Item><a href=([^>]+)><div class=Poster>'
patron += '<img src=(.+?)(?:>|alt).*?<h2>([^>]+)</h2>.*?</article>'
patron = '<article class="Item"><a href="([^>]+)"><div class="Poster"><img src="([^"]+)".*?'
patron += '<h2>([^>]+)</h2>.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
@@ -128,10 +133,9 @@ def list_all(item):
title=title,
url=url,
thumbnail=thumbnail,
plot=thumbnail,
infoLabels={'filtro':filter_list})
if item.type == 'peliculas':
if item.type == 'peliculas' or 'peliculas' in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
@@ -143,19 +147,38 @@ def list_all(item):
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=(.*?) />")
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def list_collections(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li><a href="([^"]+)">.*?"first-lIMG"><img src="([^"]+)">.*?<h2>([^<]+)</h2>.*?Fichas:?\s(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumb, title, cant in matches:
plot = 'Contiene %s elementos' % cant
itemlist.append(Item(channel=item.channel, action='list_all', title=title, url=url, thumbnail=thumb, plot=plot))
url_next_page = scrapertools.find_single_match(data, 'class="PageActiva">\d+</a><a href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_collections'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<div class=season temporada-(\d+)>'
patron='<div class="season temporada-(\d+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -189,7 +212,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron= "<li><a href=([^>]+)><b>%s - (\d+)</b><h2 class=eTitle>([^>]+)</h2>" % item.infoLabels['season']
patron= '<li><a href="([^"]+)"><b>%s - (\d+)</b><h2 class="eTitle">([^>]+)</h2>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -207,31 +230,52 @@ def episodesxseasons(item):
return itemlist
def findvideos(item):
from lib.generictools import privatedecrypt
logger.info()
itemlist = []
data = get_source(item.url)
patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>"
data = data.replace('"', "'")
patron = "onclick='clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);'>.*?<b>([a-zA-Z]+)"
matches = re.compile(patron, re.DOTALL).findall(data)
headers = {'referer': item.url}
for url, quality, language in matches:
for url, quality, language, server in matches:
url = privatedecrypt(url, headers)
if url != '':
language = IDIOMAS[language]
if quality.lower() == 'premium':
quality = '720p'
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
if 'visor/vdz' in url:
server = 'powvideo'
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,
quality=quality, infoLabels=item.infoLabels))
quality=quality, server=server, headers=headers, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return sorted(itemlist, key=lambda i: i.language)
def play(item):
from lib.generictools import privatedecrypt
logger.info()
itemlist = []
url = ''
item.server = ''
data = httptools.downloadpage(item.url, headers=item.headers, follow_redirects=False)
if 'visor/vdz' in item.url:
url = scrapertools.find_single_match(data.data, 'IFRAME SRC="([^"]+)"')
elif 'visor/if' in item.url:
url = data.headers['location']
itemlist.append(Item(channel=item.channel, url=url, action='play', server=item.server,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def search(item, texto):
logger.info()

View File

@@ -292,7 +292,8 @@ def entradas(item):
else:
# Extrae las entradas
if item.extra == "Novedades":
data2 = data.split("<h3>Últimas Películas Agregadas</h3>", 1)[1]
data2 = data.split("<h3>Últimas Películas Agregadas y Actualizadas</h3>", 1)[1]
entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
else:
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')

View File

@@ -0,0 +1,37 @@
{
"id": "legalmentegratis",
"name": "Legalmente Gratis (clasicos)",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "https://i.postimg.cc/NFGv0pN3/legalgratis.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,139 @@
# -*- coding: utf-8 -*-
# -*- Channel Legalmente Gratis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'http://legalmentegratis.com/'
IDIOMAS = {'español':'CAST', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['youtube']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article id="post-\d+".*?href="([^"]+)".*?src="([^"]+)".*?<p>(.*?) (\(?\d{4}\)?)([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot in matches:
url = scrapedurl
contentTitle = scrapedtitle
year = re.sub(r'\(|\)','', year)
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = sorted(itemlist, key=lambda it: it.contentTitle)
# Paginación
url_next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'genre':
data = scrapertools.find_single_match(data, '>Género(.*?)</ul>')
patron = 'href="([^"]+)".*?>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
new_item = Item(channel=item.channel, title=title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
lang_data = scrapertools.find_single_match(data, '<p><strong(.*?)</strong></p>')
if 'español' in lang_data:
language = 'español'
else:
language = 'VOSE'
url = scrapertools.find_single_match (data, '<iframe.*?src="([^"]+)"')
if 'gloria.tv' in url:
new_data = get_source(url)
url = 'https://gloria.tv'+ scrapertools.find_single_match(new_data, '<source type=".*?" src="([^"]+)">')
itemlist.append(Item(channel=item.channel, title='%s', action='play', url=url,
language=IDIOMAS[language], infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s' % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist

View File

@@ -39,7 +39,7 @@ list_servers = [
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculonhd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculonhd')
host = 'https://peliculonhd.com/'
host = 'https://peliculonhd.tv/'
def mainlist(item):
logger.info()
@@ -50,7 +50,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='list_all', type='tv',
itemlist.append(Item(channel=item.channel, title='Series', url=host+'ver-serie', action='list_all', type='tv',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
@@ -65,7 +65,7 @@ def menu_movies(item):
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'ver', action='list_all',
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'ver-pelicula', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movie'))
@@ -145,8 +145,8 @@ def list_all(item):
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality=quality,
contentTitle= contentTitle,
type=item.type,
infoLabels={'year':year}))
@@ -221,7 +221,8 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
data = data.replace('"','\'')
patron="class='numerando'>%s - (\d+)</div><div class='episodiotitle'>.?<a href='([^']+)'>([^<]+)<" % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -262,7 +263,7 @@ def findvideos(item):
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':type}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
test_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
if 'xyz' in test_url:

View File

@@ -60,7 +60,7 @@ def mainlist(item):
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/api/suggest?query=',
url=host + '/api/suggest/?query=',
thumbnail=get_thumb('search', auto=True)
))
@@ -185,31 +185,31 @@ def seccion(item):
def busqueda(item):
logger.info()
itemlist = []
headers = {'referer':host, 'X-Requested-With': 'XMLHttpRequest'}
headers = {'referer':host, 'X-Requested-With': 'XMLHttpRequest',
'Accept': 'application/json, text/javascript, */*; q=0.01'}
data = httptools.downloadpage(item.url, headers=headers).data
dict_data = jsontools.load(data)
resultados = dict_data['suggest']['result'][0]['options']
resultados = dict_data['data']['m']
for resultado in resultados:
if 'title' in resultado['_source']:
title = resultado['_source']['title']
thumbnail = 'https://static.pelisfox.tv/static/movie' + '/' + resultado['_source']['cover']
plot = resultado['_source']['sinopsis']
url = host + resultado['_source']['url'] + '/'
title = resultado['title']
thumbnail = 'https://static.pelisfox.tv/' + '/' + resultado['cover']
plot = resultado['synopsis']
url = host + resultado['slug'] + '/'
itemlist.append(item.clone(title=title,
thumbnail=thumbnail,
plot=plot,
url=url,
action='findvideos',
contentTitle=title
))
itemlist.append(item.clone(title=title,
thumbnail=thumbnail,
plot=plot,
url=url,
action='findvideos',
contentTitle=title
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
texto = texto.replace(" ", "%20")
item.url = item.url + texto
if texto != '':
@@ -224,6 +224,7 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
links = scrapertools.find_single_match(data, '<script>var.*?_SOURCE.?=.?(.*?);')
links = links.replace('null', '"null"')
links = links.replace('false', '"false"').replace('true', '"true"')
links = eval(links)
for link in links:

View File

@@ -1,289 +1,292 @@
# -*- coding: utf-8 -*-
import re
import urllib
import base64
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
thumbletras = {'#': 'https://s32.postimg.cc/drojt686d/image.png',
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
'b': 'https://s32.postimg.cc/y1qgm1yp1/image.png',
'c': 'https://s32.postimg.cc/vlon87gmd/image.png',
'd': 'https://s32.postimg.cc/3zlvnix9h/image.png',
'e': 'https://s32.postimg.cc/bgv32qmsl/image.png',
'f': 'https://s32.postimg.cc/y6u7vq605/image.png',
'g': 'https://s32.postimg.cc/9237ib6jp/image.png',
'h': 'https://s32.postimg.cc/812yt6pk5/image.png',
'i': 'https://s32.postimg.cc/6nbbxvqat/image.png',
'j': 'https://s32.postimg.cc/axpztgvdx/image.png',
'k': 'https://s32.postimg.cc/976yrzdut/image.png',
'l': 'https://s32.postimg.cc/fmal2e9yd/image.png',
'm': 'https://s32.postimg.cc/m19lz2go5/image.png',
'n': 'https://s32.postimg.cc/b2ycgvs2t/image.png',
'o': 'https://s32.postimg.cc/c6igsucpx/image.png',
'p': 'https://s32.postimg.cc/jnro82291/image.png',
'q': 'https://s32.postimg.cc/ve5lpfv1h/image.png',
'r': 'https://s32.postimg.cc/nmovqvqw5/image.png',
's': 'https://s32.postimg.cc/zd2t89jol/image.png',
't': 'https://s32.postimg.cc/wk9lo8jc5/image.png',
'u': 'https://s32.postimg.cc/w8s5bh2w5/image.png',
'v': 'https://s32.postimg.cc/e7dlrey91/image.png',
'w': 'https://s32.postimg.cc/fnp49k15x/image.png',
'x': 'https://s32.postimg.cc/dkep1w1d1/image.png',
'y': 'https://s32.postimg.cc/um7j3zg85/image.png',
'z': 'https://s32.postimg.cc/jb4vfm9d1/image.png'
}
audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]',
'Sub Español': '[COLOR red]SUB ESPAÑOL[/COLOR]'}
host = 'http://pelisgratis.me/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="lista",
thumbnail=get_thumb('premieres', auto=True),
url=host + 'estrenos'
))
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail=get_thumb('all', auto=True),
url=host
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host,
thumbnail=get_thumb('genres', auto=True),
extra='generos'
))
itemlist.append(item.clone(title="Alfabetico",
action="seccion",
url=host,
thumbnail=get_thumb('alphabet', auto=True),
extra='a-z'
))
itemlist.append(item.clone(title="Mas Vistas",
action="lista",
thumbnail=get_thumb('more watched', auto=True),
url=host + 'peliculas-mas-vistas'
))
itemlist.append(item.clone(title="Mas Votadas",
action="lista",
thumbnail=get_thumb('more voted', auto=True),
url=host + 'peliculas-mas-votadas'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '?s=',
thumbnail=get_thumb('search', auto=True)
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url, add_referer=True).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class=(?:MvTbImg|TPostMv).*?href=(.*?)\/(?:>| class).*?src=(.*?) '
patron += 'class=Title>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?(?:<td|class=Description)>(.*?)<(?:\/td|\/p)>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot
quality = ''
contentTitle = scrapedtitle
title = contentTitle
year = scrapedyear
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
contentTitle=contentTitle,
quality=quality,
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?) /')
if next_page != '':
itemlist.append(item.clone(action="lista",
title='Siguiente >>>',
url=next_page,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.extra == 'generos':
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)</a><\/li>'
elif item.extra == 'a-z':
patron = '<li><a href=(.*?)>(\w|#)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
thumbnail = ''
if item.extra == 'generos':
#cantidad = re.findall(r'.*?<\/a> \((\d+)\)', scrapedtitle)
title = scrapedtitle
else:
title = scrapedtitle
if title.lower() in thumbletras:
thumbnail = thumbletras[title.lower()]
itemlist.append(item.clone(action='lista', title=title, url=url, thumbnail=thumbnail))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def findvideos(item):
logger.info()
itemlist = []
global new_data
new_data = []
data = get_source(item.url)
data = data.replace("&lt;","<").replace("&quot;",'"').replace("&gt;",">").replace("&amp;","&").replace('\"',"")
patron = '<div class=TPlayerTb.*?id=(.*?)>.*?src=(.*?) frameborder'
matches = scrapertools.find_multiple_matches(data, patron)
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
if 'trembed' in urls_page:
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data = httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="([^"]+)" ')
if "repro.live" in urls_page:
server_repro(urls_page)
if "repros.live" in urls_page:
server_repros(urls_page)
if "itatroniks.com" in urls_page:
server_itatroniks(urls_page)
for url in new_data:
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
new_data = []
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist
def server_itatroniks(urls_page):
logger.info()
headers = {"Referer":urls_page}
id = scrapertools.find_single_match(urls_page, 'embed/(\w+)')
sub_data = httptools.downloadpage(urls_page, headers = headers).data
matches = scrapertools.find_multiple_matches(sub_data, 'button id="([^"]+)')
headers1 = ({"X-Requested-With":"XMLHttpRequest"})
for serv in matches:
data1 = httptools.downloadpage("https://itatroniks.com/get/%s/%s" %(id, serv), headers = headers1).data
data_json = jsontools.load(data1)
urls_page = ""
try:
if "finished" == data_json["status"]: urls_page = "https://%s/embed/%s" %(data_json["server"], data_json["extid"])
if "propio" == data_json["status"]: urls_page = "https://%s/e/%s" %(data_json["server"], data_json["extid"])
except:
continue
new_data.append(urls_page)
def server_repros(urls_page):
logger.info()
headers = {"Referer":host}
headers1 = {"X-Requested-With":"XMLHttpRequest"}
sub_data = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
for idurl in urls_page1:
#post = {"codigo":idurl}
#post = urllib.urlencode(post)
dd1 = httptools.downloadpage("https://repros.live/player/ajaxdata", post = urllib.urlencode({"codigo":idurl}), headers = headers1).data
data_json = jsontools.load(dd1)
new_data.append(data_json["url"])
def server_repro(urls_page):
logger.info()
headers = {"Referer":urls_page}
sub_data = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
for urls_page in urls_page1:
urls_page += "==" # base64.decode no decodifica si no tiene al final "=="
urls_page = base64.b64decode(urls_page)
if "repro.live" in urls_page:
data1 = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(data1, 'source src="([^"]+)')
for urls_page in urls_page1:
new_data.append(urls_page)
else:
new_data.append(urls_page)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
# categoria='peliculas'
try:
if categoria == 'peliculas':
item.url = host + 'estrenos'
elif categoria == 'infantiles':
item.url = host + 'animacion'
elif categoria == 'terror':
item.url = host + 'terror'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# -*- coding: utf-8 -*-
import re
import urllib
import base64
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
thumbletras = {'#': 'https://s32.postimg.cc/drojt686d/image.png',
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
'b': 'https://s32.postimg.cc/y1qgm1yp1/image.png',
'c': 'https://s32.postimg.cc/vlon87gmd/image.png',
'd': 'https://s32.postimg.cc/3zlvnix9h/image.png',
'e': 'https://s32.postimg.cc/bgv32qmsl/image.png',
'f': 'https://s32.postimg.cc/y6u7vq605/image.png',
'g': 'https://s32.postimg.cc/9237ib6jp/image.png',
'h': 'https://s32.postimg.cc/812yt6pk5/image.png',
'i': 'https://s32.postimg.cc/6nbbxvqat/image.png',
'j': 'https://s32.postimg.cc/axpztgvdx/image.png',
'k': 'https://s32.postimg.cc/976yrzdut/image.png',
'l': 'https://s32.postimg.cc/fmal2e9yd/image.png',
'm': 'https://s32.postimg.cc/m19lz2go5/image.png',
'n': 'https://s32.postimg.cc/b2ycgvs2t/image.png',
'o': 'https://s32.postimg.cc/c6igsucpx/image.png',
'p': 'https://s32.postimg.cc/jnro82291/image.png',
'q': 'https://s32.postimg.cc/ve5lpfv1h/image.png',
'r': 'https://s32.postimg.cc/nmovqvqw5/image.png',
's': 'https://s32.postimg.cc/zd2t89jol/image.png',
't': 'https://s32.postimg.cc/wk9lo8jc5/image.png',
'u': 'https://s32.postimg.cc/w8s5bh2w5/image.png',
'v': 'https://s32.postimg.cc/e7dlrey91/image.png',
'w': 'https://s32.postimg.cc/fnp49k15x/image.png',
'x': 'https://s32.postimg.cc/dkep1w1d1/image.png',
'y': 'https://s32.postimg.cc/um7j3zg85/image.png',
'z': 'https://s32.postimg.cc/jb4vfm9d1/image.png'
}
audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]',
'Sub Español': '[COLOR red]SUB ESPAÑOL[/COLOR]'}
host = 'http://pelisgratis.me/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="lista",
thumbnail=get_thumb('premieres', auto=True),
url=host + 'estrenos'
))
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail=get_thumb('all', auto=True),
url=host
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host,
thumbnail=get_thumb('genres', auto=True),
extra='generos'
))
itemlist.append(item.clone(title="Alfabetico",
action="seccion",
url=host,
thumbnail=get_thumb('alphabet', auto=True),
extra='a-z'
))
itemlist.append(item.clone(title="Mas Vistas",
action="lista",
thumbnail=get_thumb('more watched', auto=True),
url=host + 'peliculas-mas-vistas'
))
itemlist.append(item.clone(title="Mas Votadas",
action="lista",
thumbnail=get_thumb('more voted', auto=True),
url=host + 'peliculas-mas-votadas'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '?s=',
thumbnail=get_thumb('search', auto=True)
))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'article id=post-\d+.*?href=([^>]+)>.*?src=(.*?)\s.*?"Title">([^<]+)<(.*?)</a>.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year_data, scrapedplot in matches:
year = scrapertools.find_single_match(year_data, 'Year>(\d{4})<')
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot
quality = ''
contentTitle = scrapedtitle
title = contentTitle
year = year
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
contentTitle=contentTitle,
quality=quality,
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, 'href=([^>]+)>Siguiente &raquo;</a>')
if next_page != '':
itemlist.append(item.clone(action="lista",
title='Siguiente >>>',
url=next_page,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.extra == 'generos':
patron = 'menu-item-object-category.*?<a href=([^<]+)>([^<]+)</a>'
elif item.extra == 'a-z':
patron = '<li><a href=([^<]+)>(\w|#)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
thumbnail = ''
if item.extra == 'generos':
title = scrapedtitle
else:
title = scrapedtitle
if title.lower() in thumbletras:
thumbnail = thumbletras[title.lower()]
itemlist.append(item.clone(action='lista', title=title, url=url, thumbnail=thumbnail))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def findvideos(item):
logger.info()
itemlist = []
global new_data
new_data = []
data = get_source(item.url)
data = data.replace("&lt;","<").replace("&quot;",'"').replace("&gt;",">").replace("&amp;","&").replace('\"',"")
patron = '<div class=TPlayerTb.*?id=(.*?)>.*?src=(.*?) frameborder'
matches = scrapertools.find_multiple_matches(data, patron)
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
if 'trembed' in urls_page:
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data = httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="([^"]+)" ')
if "repro.live" in urls_page:
server_repro(urls_page)
if "repros.live" in urls_page:
server_repros(urls_page)
if "itatroniks.com" in urls_page:
server_itatroniks(urls_page)
for url in new_data:
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
new_data = []
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist
def server_itatroniks(urls_page):
logger.info()
headers = {"Referer":urls_page}
id = scrapertools.find_single_match(urls_page, 'embed/(\w+)')
sub_data = httptools.downloadpage(urls_page, headers = headers).data
matches = scrapertools.find_multiple_matches(sub_data, 'button id="([^"]+)')
headers1 = ({"X-Requested-With":"XMLHttpRequest"})
for serv in matches:
data1 = httptools.downloadpage("https://itatroniks.com/get/%s/%s" %(id, serv), headers = headers1).data
data_json = jsontools.load(data1)
urls_page = ""
try:
if "finished" == data_json["status"]: urls_page = "https://%s/embed/%s" %(data_json["server"], data_json["extid"])
if "propio" == data_json["status"]: urls_page = "https://%s/e/%s" %(data_json["server"], data_json["extid"])
except:
continue
new_data.append(urls_page)
def server_repros(urls_page):
logger.info()
headers = {"Referer":host}
headers1 = {"X-Requested-With":"XMLHttpRequest"}
sub_data = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
for idurl in urls_page1:
#post = {"codigo":idurl}
#post = urllib.urlencode(post)
dd1 = httptools.downloadpage("https://repros.live/player/ajaxdata", post = urllib.urlencode({"codigo":idurl}), headers = headers1).data
data_json = jsontools.load(dd1)
new_data.append(data_json["url"])
def server_repro(urls_page):
logger.info()
headers = {"Referer":urls_page}
sub_data = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(sub_data, 'data-embed="([^"]+)"')
for urls_page in urls_page1:
urls_page += "==" # base64.decode no decodifica si no tiene al final "=="
urls_page = base64.b64decode(urls_page)
if "repro.live" in urls_page:
data1 = httptools.downloadpage(urls_page, headers = headers).data
urls_page1 = scrapertools.find_multiple_matches(data1, 'source src="([^"]+)')
for urls_page in urls_page1:
new_data.append(urls_page)
else:
new_data.append(urls_page)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
# categoria='peliculas'
try:
if categoria == 'peliculas':
item.url = host + 'estrenos'
elif categoria == 'infantiles':
item.url = host + 'animacion'
elif categoria == 'terror':
item.url = host + 'terror'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -46,9 +46,12 @@ def mainlist(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
if referer is None:
data = httptools.downloadpage(url, ignore_response_code=True).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://archive.org/embed/(.*)",
"url": "https://archive.org/embed/\\1"
}
]
},
"free": true,
"id": "ArchiveOrg",
"name": "archiveorg",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/P5ZYJM4L/archiveorg.png"
}

View File

@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector ArchiveOrg By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[ArchiveOrg] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
logger.debug(data)
patron = '<meta property="og:video" content="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
video_urls.append(['.MP4 [ArchiveOrg]', url])
return video_urls