From 0694d0af20c86d2cdf4a9f2fd12c4c39e77ec372 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 12 Mar 2018 14:41:37 -0500
Subject: [PATCH 01/20] thevideome: fix
---
plugin.video.alfa/servers/thevideome.py | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/plugin.video.alfa/servers/thevideome.py b/plugin.video.alfa/servers/thevideome.py
index 2a84016d..e6a2910b 100755
--- a/plugin.video.alfa/servers/thevideome.py
+++ b/plugin.video.alfa/servers/thevideome.py
@@ -7,11 +7,9 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
-
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Page Cannot Be Found" in data:
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
-
return True, ""
@@ -19,21 +17,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
if not "embed" in page_url:
page_url = page_url.replace("http://thevideo.me/", "http://thevideo.me/embed-") + ".html"
-
data = httptools.downloadpage(page_url).data
-
- mpri_Key = scrapertools.find_single_match(data, "lets_play_a_game='([^']+)'")
+ var = scrapertools.find_single_match(data, 'vsign.player.*?\+ (\w+)')
+ mpri_Key = scrapertools.find_single_match(data, "%s='([^']+)'" %var)
data_vt = httptools.downloadpage("https://thevideo.me/vsign/player/%s" % mpri_Key).data
vt = scrapertools.find_single_match(data_vt, 'function\|([^\|]+)\|')
if "fallback" in vt:
vt = scrapertools.find_single_match(data_vt, 'jwConfig\|([^\|]+)\|')
-
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*\:\s*"([^"]+)"\s*,\s*"label"\s*\:\s*"([^"]+)"')
video_urls = []
-
for media_url, label in media_urls:
media_url += "?direct=false&ua=1&vt=%s" % vt
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [thevideo.me]", media_url])
-
return video_urls
From f7e955e6960b03f57e2be53c04e568cf7c391d9f Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 12 Mar 2018 14:46:04 -0500
Subject: [PATCH 02/20] cinecalidad: fix
---
plugin.video.alfa/channels/cinecalidad.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/plugin.video.alfa/channels/cinecalidad.py b/plugin.video.alfa/channels/cinecalidad.py
index 83bcb0de..1d98e6ac 100644
--- a/plugin.video.alfa/channels/cinecalidad.py
+++ b/plugin.video.alfa/channels/cinecalidad.py
@@ -248,12 +248,12 @@ def peliculas(item):
return itemlist
-def dec(item):
+def dec(item, dec_value):
link = []
val = item.split(' ')
link = map(int, val)
for i in range(len(link)):
- link[i] = link[i] - 6
+ link[i] = link[i] - int(dec_value)
real = ''.join(map(chr, link))
return (real)
@@ -302,10 +302,10 @@ def findvideos(item):
'BitTorrent': '',
'Mega': '',
'MediaFire': ''}
-
+ dec_value = scrapertools.find_single_match(data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')
for video_cod, server_id in matches:
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
- video_id = dec(video_cod)
+ video_id = dec(video_cod, dec_value)
logger.debug('server_id %s' % server_id)
if server_id in server_url:
From 4817f5aef0449b3d904e96bdcc8a7b13a3004a14 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 12 Mar 2018 17:05:02 -0500
Subject: [PATCH 03/20] Update flashx.py
---
plugin.video.alfa/servers/flashx.py | 23 +++++------------------
1 file changed, 5 insertions(+), 18 deletions(-)
diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py
index 6efb18d0..33d9a8d3 100644
--- a/plugin.video.alfa/servers/flashx.py
+++ b/plugin.video.alfa/servers/flashx.py
@@ -19,19 +19,13 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
- headers = {'Host': 'www.flashx.sx',
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
- 'Cookie': ''}
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
- cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.sx/counter.cgi.*?[^(?:'|")]+)""")
+ cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.ws/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
- playnow = scrapertools.find_single_match(data, 'https://www.flashx.sx/dl[^"]+')
+ playnow = scrapertools.find_single_match(data, 'https://www.flashx.ws/dl[^"]+')
# Para obtener el f y el fxfx
- js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.sx/js\w+/c\w+.*?[^(?:'|")]+)""")
+ js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.ws/js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -41,9 +35,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
- coding_url = 'https://www.flashx.sx/flashx.php?%s' %pfxfx
+ coding_url = 'https://www.flashx.ws/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
- bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)([^<]+).*?'
- patron += 'created_at">([^<]+)'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches:
- scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip())
- itemlist.append(Item(action = "findvideos",
- channel = item.channel,
- title = scrapedtitle,
- thumbnail = scrapedthumbnail,
- url = scrapedurl
- ))
- return itemlist
def series(item):
logger.info()
@@ -70,7 +51,7 @@ def series(item):
patron += 'title="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
- itemlist.append(Item(action = "temporadas",
+ itemlist.append(Item(action = "capitulos",
channel = item.channel,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
@@ -84,25 +65,41 @@ def series(item):
next_page += "%s" %page
itemlist.append(Item(action = "series",
channel = item.channel,
- title = "Página siguiente",
+ title = "Página siguiente >>",
url = next_page
))
return itemlist
-def temporadas(item):
+def episodios(item):
+ logger.info()
+ itemlist = []
+ itemlist = capitulos(item)
+ return itemlist
+
+
+def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
- bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?')
- matches = scrapertools.find_multiple_matches(bloque, ' (.*?[0-9]+)')
- for scrapedtitle in matches:
- season = scrapertools.find_single_match(scrapedtitle, '[0-9]+')
+ bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?Content principal')
+ patron = '(.*?)'
+ matches = scrapertools.find_multiple_matches(bloque, patron)
+ for scrapedurl, scrapedtitle in matches:
+ scrapedtitle = scrapedtitle.strip()
+ s_e = scrapertools.get_season_and_episode(scrapedurl.replace("-",""))
+ if s_e != "":
+ season = s_e.split("x")[0]
+ episode = s_e.split("x")[1]
+ else:
+ season = episode = ""
+ scrapedtitle = s_e + " - " + scrapedtitle
+ item.infoLabels["episode"] = episode
item.infoLabels["season"] = season
- url = item.url + "?temporada=%s" %season
- itemlist.append(item.clone(action = "capitulos",
+ itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle,
- url = url
+ url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
@@ -116,36 +113,6 @@ def temporadas(item):
return itemlist
-def episodios(item):
- logger.info()
- itemlist = []
- templist = temporadas(item)
- for tempitem in templist:
- itemlist += capitulos(tempitem)
- return itemlist
-
-
-def capitulos(item):
- logger.info()
- itemlist = []
- data = httptools.downloadpage(item.url).data
- patron = '
(.*?)'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedtitle in matches:
- scrapedtitle = scrapedtitle.replace("", "")
- episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)")
- scrapedtitle = scrapedtitle.split(":")[1]
- scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle)
- item.infoLabels["episode"] = episode
- itemlist.append(item.clone(action = "findvideos",
- title = scrapedtitle,
- url = scrapedurl
- ))
- tmdb.set_infoLabels(itemlist)
- return itemlist
-
-
def newest(categoria):
logger.info()
itemlist = []
@@ -183,17 +150,30 @@ def search(item, texto):
def filtro(item):
logger.info()
itemlist = []
+ filter = ""
+ filter_end = "data-uk-dropdown"
+ if item.extra == "categories":
+ filter = "genero"
+ elif item.extra == "qualitys":
+ filter = "calidad"
+ elif item.extra == "languages":
+ filter = "audio"
+ elif item.extra == "years":
+ filter = "ano"
+ filter_end = " |