Actualizados
jawcloud: Nuevo server estream: add pattern seriesblanco: lenguajes qserie: fix pelisplusco: fix hdfull: fix cinemahd: fix
This commit is contained in:
@@ -59,18 +59,22 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
full_data = data
|
||||
data = scrapertools.find_single_match(data, '<ul class=MovieList NoLmtxt.*?</ul>')
|
||||
if item.section == 'alpha':
|
||||
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
|
||||
patron += '<td>(\d{4})</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(full_data)
|
||||
else:
|
||||
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
|
||||
patron += '<h3 class=Title>(.*?)<\/h3>.*?<span class=Year>(.*?)<\/span>'
|
||||
data = get_source(item.url)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
patron += '<h3 class=Title>(.*?)<\/h3>(?:</a>|<span class=Year>(.*?)<\/span>)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
|
||||
|
||||
url = scrapedurl
|
||||
if year == '':
|
||||
year = '-'
|
||||
if "|" in scrapedtitle:
|
||||
scrapedtitle= scrapedtitle.split("|")
|
||||
contentTitle = scrapedtitle[0].strip()
|
||||
@@ -92,7 +96,7 @@ def list_all(item):
|
||||
|
||||
# Paginación
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
|
||||
url_next_page = scrapertools.find_single_match(full_data,'<a class=next.*?href=(.*?)>')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
return itemlist
|
||||
@@ -102,14 +106,13 @@ def section(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
|
||||
action = 'list_all'
|
||||
if item.section == 'quality':
|
||||
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
|
||||
patron = 'menu-item-object-category.*?menu-item-\d+ menu-category-list><a href=(.*?)>(.*?)<\/a>'
|
||||
elif item.section == 'genre':
|
||||
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
|
||||
patron = '<a href=([^ ]+) class=Button STPb>(.*?)</a>'
|
||||
elif item.section == 'year':
|
||||
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
|
||||
patron = '<li><a href=([^>]+)>(\d{4})<\/a><\/li>'
|
||||
elif item.section == 'alpha':
|
||||
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
|
||||
action = 'list_all'
|
||||
|
||||
@@ -517,10 +517,10 @@ def findvideos(item):
|
||||
url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))
|
||||
title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
|
||||
it1.append(
|
||||
item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
|
||||
Item(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
|
||||
thumbnail=item.thumbnail, show=item.show))
|
||||
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
|
||||
thumbnail=item.thumbnail, show=item.show, folder=True))
|
||||
thumbnail=item.thumbnail, show=item.show, language=item.language, folder=True))
|
||||
data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
|
||||
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
|
||||
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
|
||||
@@ -566,8 +566,8 @@ def findvideos(item):
|
||||
if account:
|
||||
url += "###" + id + ";" + type
|
||||
it2.append(
|
||||
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
|
||||
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels, language=idioma,
|
||||
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
|
||||
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
|
||||
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
|
||||
|
||||
@@ -85,7 +85,8 @@ def search(item, texto):
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
itemlist =[]
|
||||
data = httptools.downloadpage(item.url, add_referer=True).data
|
||||
headers = {'Referer':host, 'X-Requested-With': 'XMLHttpRequest'}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
dict_data = jsontools.load(data)
|
||||
list =dict_data["data"] [item.type]
|
||||
if item.type == "m":
|
||||
|
||||
@@ -2,10 +2,13 @@
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -316,40 +319,33 @@ def lasmas(item):
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def get_link(data):
|
||||
new_url = scrapertools.find_single_match(data, '(?:IFRAME|iframe) src=(.*?) scrolling')
|
||||
return new_url
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
host = 'https://www.locopelis.tv/'
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
anterior = scrapertools.find_single_match(data, '<a class="left" href="([^"]+)" title="Cap.tulo Anterior"></a>')
|
||||
siguiente = scrapertools.find_single_match(data, '<a class="right" href="([^"]+)" title="Cap.tulo Siguiente"></a>')
|
||||
titulo = scrapertools.find_single_match(data,
|
||||
'<h1 class="tithd bold fs18px lnht30px ico_b pdtop10px">([^<]+)</h1> ')
|
||||
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
|
||||
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
if 'youtube' in videoitem.url:
|
||||
itemlist.remove(videoitem)
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.fanart = item.fanart
|
||||
videoitem.title = titulo + " " + videoitem.server
|
||||
if item.extra2 != 'todos':
|
||||
data = httptools.downloadpage(anterior).data
|
||||
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
|
||||
if not existe:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Anterior', url=anterior,
|
||||
thumbnail='https://s1.postimg.cc/dbq8gvldb/anterior.png', folder=True))
|
||||
|
||||
data = httptools.downloadpage(siguiente).data
|
||||
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
|
||||
if not existe:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Siguiente', url=siguiente,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', folder=True))
|
||||
new_url = get_link(get_source(item.url))
|
||||
new_url = get_link(get_source(new_url))
|
||||
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
|
||||
new_url = '%s%s' % (host, 'playeropstream/api.php')
|
||||
post = {'h': video_id}
|
||||
post = urllib.urlencode(post)
|
||||
data = httptools.downloadpage(new_url, post=post).data
|
||||
json_data = jsontools.load(data)
|
||||
url = json_data['url']
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = '%s' % server
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
|
||||
server=server, infoLabels=item.infoLabels))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
"thumbnail": "https://s22.postimg.cc/nucz720sx/image.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -6,6 +6,10 @@
|
||||
{
|
||||
"pattern": "https://estream.to/embed-([a-z0-9]+).html",
|
||||
"url": "https://estream.to/\\1.html"
|
||||
},
|
||||
{
|
||||
"pattern": "https://estream.xyz/embed-([a-z0-9]+).html",
|
||||
"url": "https://estream.to/\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
42
plugin.video.alfa/servers/jawcloud.json
Normal file
42
plugin.video.alfa/servers/jawcloud.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(jawcloud.co/embed-([A-z0-9]+))",
|
||||
"url": "https://\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "jawcloud",
|
||||
"name": "jawcloud",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s8.postimg.cc/b64mzlgxh/jawcloud1.png"
|
||||
}
|
||||
20
plugin.video.alfa/servers/jawcloud.py
Normal file
20
plugin.video.alfa/servers/jawcloud.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
|
||||
video_urls.append([".MP4 [jawcloud]", videourl])
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user