(.*?)<\/h3>(?:|(.*?)<\/span>)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
+ if year == '':
+ year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
@@ -92,7 +96,7 @@ def list_all(item):
# Paginación
- url_next_page = scrapertools.find_single_match(data,'')
+ url_next_page = scrapertools.find_single_match(full_data,'')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
@@ -102,14 +106,13 @@ def section(item):
itemlist = []
data = get_source(host)
-
action = 'list_all'
if item.section == 'quality':
- patron = 'menu-item-object-category.*?menu-item-\d+>(.*?)<\/a>'
+ patron = 'menu-item-object-category.*?menu-item-\d+ menu-category-list>(.*?)<\/a>'
elif item.section == 'genre':
- patron = '(.*?)'
+ patron = '(.*?)'
elif item.section == 'year':
- patron = 'custom menu-item-15\d+>(\d{4})<\/a><\/li>'
+ patron = '- ]+)>(\d{4})<\/a><\/li>'
elif item.section == 'alpha':
patron = '
- (.*?)'
action = 'list_all'
diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py
index a05cd914..30a8de5d 100644
--- a/plugin.video.alfa/channels/hdfull.py
+++ b/plugin.video.alfa/channels/hdfull.py
@@ -517,10 +517,10 @@ def findvideos(item):
url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))
title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
it1.append(
- item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
+ Item(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
thumbnail=item.thumbnail, show=item.show))
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
- thumbnail=item.thumbnail, show=item.show, folder=True))
+ thumbnail=item.thumbnail, show=item.show, language=item.language, folder=True))
data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
@@ -566,8 +566,8 @@ def findvideos(item):
if account:
url += "###" + id + ";" + type
it2.append(
- item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
- plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
+ Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
+ plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels, language=idioma,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
diff --git a/plugin.video.alfa/channels/pelisplusco.py b/plugin.video.alfa/channels/pelisplusco.py
index 969116ed..d86a139d 100644
--- a/plugin.video.alfa/channels/pelisplusco.py
+++ b/plugin.video.alfa/channels/pelisplusco.py
@@ -85,7 +85,8 @@ def search(item, texto):
def sub_search(item):
logger.info()
itemlist =[]
- data = httptools.downloadpage(item.url, add_referer=True).data
+ headers = {'Referer':host, 'X-Requested-With': 'XMLHttpRequest'}
+ data = httptools.downloadpage(item.url, headers=headers).data
dict_data = jsontools.load(data)
list =dict_data["data"] [item.type]
if item.type == "m":
diff --git a/plugin.video.alfa/channels/qserie.py b/plugin.video.alfa/channels/qserie.py
index f91a6d6a..33657a65 100755
--- a/plugin.video.alfa/channels/qserie.py
+++ b/plugin.video.alfa/channels/qserie.py
@@ -2,10 +2,13 @@
import re
import urlparse
+import urllib
from core import httptools
from core import scrapertools
+from core import servertools
from core import tmdb
+from core import jsontools
from core.item import Item
from platformcode import config, logger
@@ -316,40 +319,33 @@ def lasmas(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
+def get_source(url):
+ logger.info()
+ data = httptools.downloadpage(url).data
+ data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data)
+ return data
+
+def get_link(data):
+ new_url = scrapertools.find_single_match(data, '(?:IFRAME|iframe) src=(.*?) scrolling')
+ return new_url
def findvideos(item):
logger.info()
+ host = 'https://www.locopelis.tv/'
itemlist = []
- data = httptools.downloadpage(item.url).data
-
- anterior = scrapertools.find_single_match(data, '')
- siguiente = scrapertools.find_single_match(data, '')
- titulo = scrapertools.find_single_match(data,
- '([^<]+)
')
- existe = scrapertools.find_single_match(data, 'La pel.cula que quieres ver no existe. ')
-
- from core import servertools
- itemlist.extend(servertools.find_video_items(data=data))
- for videoitem in itemlist:
- if 'youtube' in videoitem.url:
- itemlist.remove(videoitem)
- for videoitem in itemlist:
- videoitem.channel = item.channel
- videoitem.action = "play"
- videoitem.folder = False
- videoitem.fanart = item.fanart
- videoitem.title = titulo + " " + videoitem.server
- if item.extra2 != 'todos':
- data = httptools.downloadpage(anterior).data
- existe = scrapertools.find_single_match(data, 'La pel.cula que quieres ver no existe. ')
- if not existe:
- itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Anterior', url=anterior,
- thumbnail='https://s1.postimg.cc/dbq8gvldb/anterior.png', folder=True))
-
- data = httptools.downloadpage(siguiente).data
- existe = scrapertools.find_single_match(data, 'La pel.cula que quieres ver no existe. ')
- if not existe:
- itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Siguiente', url=siguiente,
- thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', folder=True))
+ new_url = get_link(get_source(item.url))
+ new_url = get_link(get_source(new_url))
+ video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
+ new_url = '%s%s' % (host, 'playeropstream/api.php')
+ post = {'h': video_id}
+ post = urllib.urlencode(post)
+ data = httptools.downloadpage(new_url, post=post).data
+ json_data = jsontools.load(data)
+ url = json_data['url']
+ server = servertools.get_server_from_url(url)
+ title = '%s' % server
+ itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
+ server=server, infoLabels=item.infoLabels))
return itemlist
+
diff --git a/plugin.video.alfa/channels/seriesblanco.json b/plugin.video.alfa/channels/seriesblanco.json
index cab8afd1..088b3c9e 100644
--- a/plugin.video.alfa/channels/seriesblanco.json
+++ b/plugin.video.alfa/channels/seriesblanco.json
@@ -7,7 +7,8 @@
"thumbnail": "https://s22.postimg.cc/nucz720sx/image.png",
"banner": "",
"categories": [
- "tvshow"
+ "tvshow",
+ "vos"
],
"settings": [
{
diff --git a/plugin.video.alfa/servers/estream.json b/plugin.video.alfa/servers/estream.json
index 5f2b84c4..d9f644cf 100644
--- a/plugin.video.alfa/servers/estream.json
+++ b/plugin.video.alfa/servers/estream.json
@@ -6,6 +6,10 @@
{
"pattern": "https://estream.to/embed-([a-z0-9]+).html",
"url": "https://estream.to/\\1.html"
+ },
+ {
+ "pattern": "https://estream.xyz/embed-([a-z0-9]+).html",
+ "url": "https://estream.to/\\1.html"
}
]
},
diff --git a/plugin.video.alfa/servers/jawcloud.json b/plugin.video.alfa/servers/jawcloud.json
new file mode 100644
index 00000000..1a564aa3
--- /dev/null
+++ b/plugin.video.alfa/servers/jawcloud.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "(jawcloud.co/embed-([A-z0-9]+))",
+ "url": "https://\\1.html"
+ }
+ ]
+ },
+ "free": true,
+ "id": "jawcloud",
+ "name": "jawcloud",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://s8.postimg.cc/b64mzlgxh/jawcloud1.png"
+}
diff --git a/plugin.video.alfa/servers/jawcloud.py b/plugin.video.alfa/servers/jawcloud.py
new file mode 100644
index 00000000..591d080a
--- /dev/null
+++ b/plugin.video.alfa/servers/jawcloud.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ return True, ""
+
+
+def get_video_url(page_url, user="", password="", video_password=""):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ video_urls = []
+ videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
+ video_urls.append([".MP4 [jawcloud]", videourl])
+
+ return video_urls
|\s{2,}', "", data) + return data + +def get_link(data): + new_url = scrapertools.find_single_match(data, '(?:IFRAME|iframe) src=(.*?) scrolling') + return new_url def findvideos(item): logger.info() + host = 'https://www.locopelis.tv/' itemlist = [] - data = httptools.downloadpage(item.url).data - - anterior = scrapertools.find_single_match(data, '') - siguiente = scrapertools.find_single_match(data, '') - titulo = scrapertools.find_single_match(data, - '