From 649c622876e9df34468616ff13d1628c425033c7 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sun, 20 May 2018 09:06:15 -0500
Subject: [PATCH 01/12] dailymotion: fix live stream
---
plugin.video.alfa/servers/dailymotion.py | 16 +++-------------
1 file changed, 3 insertions(+), 13 deletions(-)
diff --git a/plugin.video.alfa/servers/dailymotion.py b/plugin.video.alfa/servers/dailymotion.py
index 9a8d8d9a..3cb282e0 100755
--- a/plugin.video.alfa/servers/dailymotion.py
+++ b/plugin.video.alfa/servers/dailymotion.py
@@ -7,31 +7,21 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
-
response = httptools.downloadpage(page_url)
if response.code == 404:
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
-
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
-
response = httptools.downloadpage(page_url, cookies=False)
cookie = {'Cookie': response.headers["set-cookie"]}
data = response.data.replace("\\", "")
-
- '''
- "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
- '''
-
subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
for calidad, urls in qualities:
- if calidad == "auto":
- continue
patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
matches = scrapertools.find_multiple_matches(urls, patron)
for stream_type, stream_url in matches:
@@ -41,10 +31,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
follow_redirects=False).headers.get("location", stream_url)
else:
data_m3u8 = httptools.downloadpage(stream_url).data
- stream_url = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
+ stream_url_http = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
+ if stream_url_http:
+ stream_url = stream_url_http
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
-
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
-
return video_urls
From 081dad640406203dae43c25c0d618020193dd260 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 21 May 2018 11:55:50 -0500
Subject: [PATCH 02/12] ver-peliculas: fix
---
plugin.video.alfa/channels/ver-peliculas.py | 42 +++++++++++----------
1 file changed, 22 insertions(+), 20 deletions(-)
diff --git a/plugin.video.alfa/channels/ver-peliculas.py b/plugin.video.alfa/channels/ver-peliculas.py
index e95c3f95..e63fc06a 100644
--- a/plugin.video.alfa/channels/ver-peliculas.py
+++ b/plugin.video.alfa/channels/ver-peliculas.py
@@ -38,8 +38,8 @@ def mainlist(item):
Item(channel=item.channel,
title="Español",
action="listado",
- url=host + "peliculas/en-espanol/"
- ))
+ url=host + "peliculas/en-espanol/",
+ thumbnail = get_thumb("channels_spanish.png")))
itemlist.append(
Item(channel=item.channel,
title="Latino",
@@ -54,9 +54,10 @@ def mainlist(item):
thumbnail=get_thumb("channels_vos.png")))
itemlist.append(
Item(channel=item.channel,
- title="Categorias",
+ title="Generos",
action="categories",
- url=host
+ url=host,
+ thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(
Item(channel=item.channel,
@@ -95,7 +96,6 @@ def search(item, texto):
post = "keyword=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
data = data.replace('\\"', '"').replace('\\/', '/')
- logger.debug("data %s" % data)
pattern = 'url\((.*?)\).+?(.*?)'
matches = re.compile(pattern, re.DOTALL).findall(data)
@@ -146,14 +146,6 @@ def listado(item):
title=">> Página siguiente",
url=url,
thumbnail=get_thumb("next.png")))
-
- for item in itemlist:
- if item.infoLabels['plot'] == '':
- data = httptools.downloadpage(item.url).data
- item.plot = scrapertools.find_single_match(data, '
([^<]+)
').strip()
- item.fanart = scrapertools.find_single_match(data, '')
-
-
return itemlist
@@ -172,10 +164,13 @@ def findvideos(item):
video_info = scrapertools.find_single_match(data, "load_player\('([^']+).*?([^']+)")
movie_info = scrapertools.find_single_match(item.url,
'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.')
+
+
movie_host = movie_info[0]
- movie_id = movie_info[1]
- movie_name = movie_info[2]
- sub = video_info[1]
+ movie_id = scrapertools.find_single_match(data,'id=idpelicula value=(.*?)>')
+ movie_name = scrapertools.find_single_match(data,'id=nombreslug value=(.*?)>')
+ sub = scrapertools.find_single_match(data, 'id=imdb value=(.*?)>')
+ sub = '%s/subtix/%s.srt' % (movie_host, sub)
url_base = 'http://ver-peliculas.%s/core/api.php?id=%s&slug=%s' % (movie_host, movie_id, movie_name)
data = httptools.downloadpage(url_base).data
json_data = jsontools.load(data)
@@ -185,8 +180,10 @@ def findvideos(item):
video_base_url = host + '/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
- languages = ['latino', 'spanish', 'subtitulos']
+ languages = ['latino', 'spanish', 'subtitulos', 'subtitulosp']
for lang in languages:
+ if lang not in video_lang:
+ continue
if video_lang[lang] != None:
if not isinstance(video_lang[lang], int):
video_id = video_lang[lang][0]["video"]
@@ -199,15 +196,20 @@ def findvideos(item):
for video_link in sources:
url = video_link['sources']
if url not in duplicated and server!='drive':
- lang = lang.capitalize()
- if lang == 'Spanish':
+
+ if lang == 'spanish':
lang = 'Español'
+ elif 'sub' in lang:
+ lang = 'Subtitulada'
+ lang = lang.capitalize()
title = 'Ver en %s [' + lang + ']'
thumbnail = servertools.guess_server_thumbnail(server)
itemlist.append(item.clone(title=title,
url=url,
thumbnail=thumbnail,
- action='play'
+ action='play',
+ language=lang
+
))
duplicated.append(url)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
From d54046be0166ee6dd352f1f5f6a05a22b872f0d8 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 21 May 2018 12:49:04 -0500
Subject: [PATCH 03/12] Update trakt_tools.py
---
plugin.video.alfa/core/trakt_tools.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/plugin.video.alfa/core/trakt_tools.py b/plugin.video.alfa/core/trakt_tools.py
index 914de6dc..e07c5971 100644
--- a/plugin.video.alfa/core/trakt_tools.py
+++ b/plugin.video.alfa/core/trakt_tools.py
@@ -203,13 +203,17 @@ def trakt_check(itemlist):
id_result = ''
# check = u'\u221a'
check = 'v'
- get_sync_from_file()
+ synced = False
try:
for item in itemlist:
info = item.infoLabels
if info != '' and info['mediatype'] in ['movie', 'episode'] and item.channel != 'videolibrary':
+ if not synced:
+ get_sync_from_file()
+ synced = True
+
mediatype = 'movies'
id_type = 'tmdb'
From e032e025c805fef47bb5aad31d096df442f54ab0 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 21 May 2018 14:19:38 -0500
Subject: [PATCH 04/12] poseidonhd: fix
---
plugin.video.alfa/channels/poseidonhd.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugin.video.alfa/channels/poseidonhd.py b/plugin.video.alfa/channels/poseidonhd.py
index 7a729699..ded2a793 100644
--- a/plugin.video.alfa/channels/poseidonhd.py
+++ b/plugin.video.alfa/channels/poseidonhd.py
@@ -274,7 +274,7 @@ def findvideos(item):
#title = '%s [%s]' % (item.title, language)
itemlist.append(item.clone(title='[%s] [%s]', url=url, action='play', subtitle=subs,
language=language, quality=quality, infoLabels = item.infoLabels))
- itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
+ itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para Filtrar enlaces
From 22f3b2446948b59bd0d13489891885e6086a1dd3 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Tue, 22 May 2018 10:32:46 -0500
Subject: [PATCH 05/12] netutv: fix
---
plugin.video.alfa/servers/netutv.py | 28 ++++------------------------
1 file changed, 4 insertions(+), 24 deletions(-)
diff --git a/plugin.video.alfa/servers/netutv.py b/plugin.video.alfa/servers/netutv.py
index df396157..db65a7ee 100755
--- a/plugin.video.alfa/servers/netutv.py
+++ b/plugin.video.alfa/servers/netutv.py
@@ -8,45 +8,38 @@ from core import jsontools
from core import scrapertools
from platformcode import logger
+
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
-
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
page_url = page_url.replace("http://netu.tv/watch_video.php?v=", "http://hqq.tv/player/embed_player.php?vid=")
-
data = httptools.downloadpage(page_url).data
-
if "var userid = '';" in data.lower():
return False, "[netutv] El archivo no existe o ha sido borrado"
-
return True, ""
+
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
-
if "hash=" in page_url:
data = urllib.unquote(httptools.downloadpage(page_url).data)
- id_video = scrapertools.find_single_match(data, "vid\s*=\s*'([^']+)'")
+ id_video = scrapertools.find_single_match(data, "vid':'([^']+)'")
else:
id_video = page_url.rsplit("=", 1)[1]
page_url_hqq = "http://hqq.watch/player/embed_player.php?vid=%s&autoplay=no" % id_video
data_page_url_hqq = httptools.downloadpage(page_url_hqq, add_referer=True).data
-
js_wise = scrapertools.find_single_match(data_page_url_hqq,
"")
data_unwise = jswise(js_wise).replace("\\", "")
at = scrapertools.find_single_match(data_unwise, 'var at\s*=\s*"([^"]+)"')
http_referer = scrapertools.find_single_match(data_unwise, 'var http_referer\s*=\s*"([^"]+)"')
-
url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on" \
"&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=" % (id_video, at, http_referer)
data_player = httptools.downloadpage(url, add_referer=True).data
-
data_unescape = scrapertools.find_multiple_matches(data_player, 'document.write\(unescape\("([^"]+)"')
data = ""
for d in data_unescape:
data += urllib.unquote(d)
-
subtitle = scrapertools.find_single_match(data, 'value="sublangs=Spanish.*?sub=([^&]+)&')
if not subtitle:
subtitle = scrapertools.find_single_match(data, 'value="sublangs=English.*?sub=([^&]+)&')
@@ -55,7 +48,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
"")
if js_wise:
data_unwise_player = jswise(js_wise).replace("\\", "")
-
vars_data = scrapertools.find_single_match(data, '/player/get_md5.php",\s*\{(.*?)\}')
matches = scrapertools.find_multiple_matches(vars_data, '\s*([^:]+):\s*([^,]*)[,"]')
params = {}
@@ -69,22 +61,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if not value_var and data_unwise_player:
value_var = scrapertools.find_single_match(data_unwise_player, 'var\s*%s\s*=\s*"([^"]+)"' % value)
params[key] = value_var
-
params = urllib.urlencode(params)
head = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
data = httptools.downloadpage("http://hqq.watch/player/get_md5.php?" + params, headers=head).data
-
media_urls = []
url_data = jsontools.load(data)
- media_url = tb(url_data["html5_file"].replace("#", ""))
-
+ media_url = "https:" + tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
video_urls = []
media = media_url + "|User-Agent=Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X)"
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [netu.tv]", media, 0, subtitle])
-
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
-
return video_urls
@@ -95,7 +82,6 @@ def tb(b_m3u8_2):
while j < len(b_m3u8_2):
s2 += "\\u0" + b_m3u8_2[j:(j + 3)]
j += 3
-
return s2.decode('unicode-escape').encode('ASCII', 'ignore')
@@ -105,15 +91,12 @@ def tb(b_m3u8_2):
def jswise(wise):
## js2python
def js_wise(wise):
-
w, i, s, e = wise
-
v0 = 0;
v1 = 0;
v2 = 0
v3 = [];
v4 = []
-
while True:
if v0 < 5:
v4.append(w[v0])
@@ -131,12 +114,10 @@ def jswise(wise):
v3.append(s[v2])
v2 += 1
if len(w) + len(i) + len(s) + len(e) == len(v3) + len(v4) + len(e): break
-
v5 = "".join(v3);
v6 = "".join(v4)
v1 = 0
v7 = []
-
for v0 in range(0, len(v3), 2):
v8 = -1
if ord(v6[v1]) % 2: v8 = 1
@@ -144,7 +125,6 @@ def jswise(wise):
v1 += 1
if v1 >= len(v4): v1 = 0
return "".join(v7)
-
## loop2unobfuscated
while True:
wise = re.search("var\s.+?\('([^']+)','([^']+)','([^']+)','([^']+)'\)", wise, re.DOTALL)
From 65182e1f979ad51133ff025e4501b4d805d6e91d Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Tue, 22 May 2018 12:48:30 -0500
Subject: [PATCH 06/12] wikiseries: nuevo canal
---
plugin.video.alfa/channels/wikiseries.json | 37 +++
plugin.video.alfa/channels/wikiseries.py | 251 +++++++++++++++++++++
2 files changed, 288 insertions(+)
create mode 100644 plugin.video.alfa/channels/wikiseries.json
create mode 100644 plugin.video.alfa/channels/wikiseries.py
diff --git a/plugin.video.alfa/channels/wikiseries.json b/plugin.video.alfa/channels/wikiseries.json
new file mode 100644
index 00000000..d0cf2aee
--- /dev/null
+++ b/plugin.video.alfa/channels/wikiseries.json
@@ -0,0 +1,37 @@
+{
+ "id": "wikiseries",
+ "name": "WikiSeries",
+ "active": true,
+ "adult": false,
+ "language": ["lat", "cast", "vo", "vose"],
+ "thumbnail": "https://s31.postimg.cc/tnmcrytnv/16142379_1847422438815031_3788419094563167644_n.jpg",
+ "banner": "",
+ "categories": [
+ "tvshow"
+ ],
+ "settings": [
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Incluir en busqueda global",
+ "default": false,
+ "enabled": false,
+ "visible": false
+ },
+ {
+ "id": "filter_languages",
+ "type": "list",
+ "label": "Mostrar enlaces en idioma...",
+ "default": 0,
+ "enabled": true,
+ "visible": true,
+ "lvalues": [
+ "No filtrar",
+ "Latino",
+ "Español",
+ "VOSE",
+ "VO"
+ ]
+ }
+ ]
+}
diff --git a/plugin.video.alfa/channels/wikiseries.py b/plugin.video.alfa/channels/wikiseries.py
new file mode 100644
index 00000000..89e65e97
--- /dev/null
+++ b/plugin.video.alfa/channels/wikiseries.py
@@ -0,0 +1,251 @@
+# -*- coding: utf-8 -*-
+# -*- Channel wikiseries -*-
+# -*- Created for Alfa-addon -*-
+# -*- By the Alfa Develop Group -*-
+
+import re
+
+from channels import autoplay
+from channels import filtertools
+from core import httptools
+from core import scrapertools
+from core import servertools
+from core import jsontools
+from core import tmdb
+from core.item import Item
+from platformcode import config, logger
+from channelselector import get_thumb
+
+host = 'http://www.wikiseriesonline.nu/'
+
+list_language = ['Latino', 'Español', 'VOSE', 'VO']
+list_quality = []
+list_servers = ['openload']
+
+def get_source(url):
+ logger.info()
+ data = httptools.downloadpage(url).data
+ data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data)
+ return data
+
+def mainlist(item):
+ logger.info()
+
+ autoplay.init(item.channel, list_servers, list_quality)
+
+ itemlist =[]
+
+ itemlist.append(
+ Item(channel=item.channel, title="Nuevos Capitulos", action="list_all", url=host + 'category/episode',
+ thumbnail=get_thumb('new episodes', auto=True)))
+
+ itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'category/serie',
+ thumbnail=get_thumb('all', auto=True)))
+
+ itemlist.append(Item(channel=item.channel, title="Generos", action="genres",
+ url=host + 'latest-episodes', thumbnail=get_thumb('genres', auto=True)))
+
+ itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
+ thumbnail=get_thumb('search', auto=True)))
+
+ itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
+ autoplay.show_option(item.channel, itemlist)
+ return itemlist
+
+def list_all(item):
+ logger.info()
+
+ itemlist = []
+
+ data = get_source(item.url)
+ patron = '39;src=.*?(http.*?)style=display:.*?one-line href=(.*?) title=.*?>(.*?)<'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
+ url = scrapedurl
+ scrapedtitle = scrapedtitle.replace('×','x')
+
+ contentSerieName = scrapedtitle
+ action = 'seasons'
+
+ if 'episode' in item.url:
+ scrapedtitle, season, episode = scrapertools.find_single_match(scrapedtitle, '(.*?) (\d+)x(\d+)')
+ contentSerieName = scrapedtitle
+ scrapedtitle = '%sx%s - %s' % (season, episode, scrapedtitle)
+ action='findvideos'
+
+ thumbnail = scrapedthumbnail
+ new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
+ thumbnail=thumbnail, contentSerieName=contentSerieName, action=action,
+ context=filtertools.context(item, list_language, list_quality))
+
+ if 'episode' in item.url:
+ new_item.contentSeasonNumber = season
+ new_item.contentepisodeNumber = episode
+ new_item.context = []
+
+ itemlist.append(new_item)
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ # Paginacion
+ next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?)>»')
+ if next_page != '':
+ itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
+ url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
+ type=item.type))
+ return itemlist
+
+
+def genres(item):
+
+ itemlist = []
+
+ data = get_source(host)
+ patron = ' (.*?)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedtitle in matches:
+
+ if scrapedtitle != 'Series':
+ itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=host + scrapedurl, action='list_all'))
+
+ return itemlist
+
+
+def seasons(item):
+ logger.info()
+ itemlist = []
+
+ data = get_source(item.url)
+
+ patron = 'data-season-num=1>(.*?)'
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ infoLabels = item.infoLabels
+ for scrapedseason in matches:
+ contentSeasonNumber = scrapedseason
+ title = 'Temporada %s' % scrapedseason
+ infoLabels['season'] = contentSeasonNumber
+
+ itemlist.append(Item(channel=item.channel, action='episodesxseason', url=item.url, title=title,
+ contentSeasonNumber=contentSeasonNumber, infoLabels=infoLabels))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ if config.get_videolibrary_support() and len(itemlist) > 0:
+ itemlist.append(
+ Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
+ action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName,
+ extra1='library'))
+
+ return itemlist
+
+def all_episodes(item):
+ logger.info()
+ itemlist = []
+ templist = seasons(item)
+ for tempitem in templist:
+ itemlist += episodesxseason(tempitem)
+ return itemlist
+
+def episodesxseason(item):
+ logger.info()
+ itemlist = []
+ data = get_source(item.url)
+ season = item.contentSeasonNumber
+ patron = '.*?.*?name>(.*?)<.*?class=lgn (.*?)' % season
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ infoLabels = item.infoLabels
+ for scrapedepi, scrapedurl, scrapedtitle, languages in matches:
+ url = scrapedurl
+ language = scrapertools.find_multiple_matches(languages, 'title=(.*?)>')
+ contentEpisodeNumber = scrapedepi
+ title = '%sx%s - %s %s' % (season, contentEpisodeNumber, scrapedtitle, language)
+ infoLabels['episode'] = contentEpisodeNumber
+ itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
+ contentSerieName=item.contentSerieName, contentEpisodeNumber=contentEpisodeNumber,
+ language=language, infoLabels=infoLabels))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ return itemlist
+
+def search(item, text):
+ logger.info()
+
+ item.url = item.url + text
+ item.text = text
+ item.type = 'search'
+ if text != '':
+ #return list_all(item)
+ return search_results(item)
+
+
+def search_results(item):
+ import urllib
+ itemlist = []
+ headers={"Origin": "http://www.wikiseriesonline.nu",
+ "Accept-Encoding": "gzip, deflate", "Host": "www.wikiseriesonline.nu",
+ "Accept-Language": "es-ES,es;q=0.8,en;q=0.6",
+ "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
+ "Accept": "*/*", "Referer": item.url,
+ "X-Requested-With": "XMLHttpRequest", "Connection": "keep-alive", "Content-Length": "7"}
+ post = {"n":item.text}
+ post = urllib.urlencode(post)
+ url = host + 'wp-content/themes/wikiSeries/searchajaxresponse.php'
+ data = httptools.downloadpage(url, post=post, headers=headers).data
+ data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data)
+
+ patron = ".*?.*?src=(.*?) .*?titleinst>(.*?)<"
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
+ if item.text.lower() in scrapedtitle.lower():
+ itemlist.append(Item(channel=item.channel, title=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl,
+ thumbnail=scrapedthumbnail, action='seasons',
+ context=filtertools.context(item, list_language, list_quality)))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ return itemlist
+
+
+def findvideos(item):
+
+ itemlist = []
+ data=get_source(item.url)
+ patron = '
Date: Wed, 23 May 2018 10:10:18 -0500
Subject: [PATCH 07/12] uptobox: test_video_exists actualizado
---
plugin.video.alfa/servers/uptobox.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/plugin.video.alfa/servers/uptobox.py b/plugin.video.alfa/servers/uptobox.py
index 43d90807..d54241b4 100755
--- a/plugin.video.alfa/servers/uptobox.py
+++ b/plugin.video.alfa/servers/uptobox.py
@@ -14,7 +14,8 @@ def test_video_exists(page_url):
if "Streaming link:" in data:
return True, ""
- elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data or "This stream doesn" in data:
+ elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data or "This stream doesn" in data\
+ or "Page not found" in data:
return False, "[Uptobox] El archivo no existe o ha sido borrado"
wait = scrapertools.find_single_match(data, "You have to wait ([0-9]+) (minute|second)")
if len(wait) > 0:
From 88d55658f1b8eb68a324f6e15f50f2aa53bacd8a Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Wed, 23 May 2018 10:55:03 -0500
Subject: [PATCH 08/12] allpeliculas: cambios cosmeticos
---
plugin.video.alfa/channels/allpeliculas.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/plugin.video.alfa/channels/allpeliculas.py b/plugin.video.alfa/channels/allpeliculas.py
index 4e249b29..875c87db 100644
--- a/plugin.video.alfa/channels/allpeliculas.py
+++ b/plugin.video.alfa/channels/allpeliculas.py
@@ -148,12 +148,13 @@ def findvideos(item):
action = "play",
title = calidad,
fulltitle = item.title,
+ thumbnail = item.thumbnail,
contentThumbnail = item.thumbnail,
url = url,
language = IDIOMAS['Latino']
))
- tmdb.set_infoLabels(itemlist, seekTmdb = True)
itemlist = servertools.get_servers_itemlist(itemlist)
+ tmdb.set_infoLabels(itemlist, seekTmdb = True)
itemlist.append(Item(channel=item.channel))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
From 0cb8dccd8cda01ea4739292010af418656da28a3 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Wed, 23 May 2018 11:30:25 -0500
Subject: [PATCH 09/12] clipwatching: pattern updated
---
plugin.video.alfa/servers/clipwatching.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/servers/clipwatching.json b/plugin.video.alfa/servers/clipwatching.json
index 8343f4a6..2f1ac627 100644
--- a/plugin.video.alfa/servers/clipwatching.json
+++ b/plugin.video.alfa/servers/clipwatching.json
@@ -4,8 +4,8 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "clipwatching.com/(\\w+)",
- "url": "http://clipwatching.com/\\1.html"
+ "pattern": "clipwatching.com/(.*?.html)",
+ "url": "http://clipwatching.com/\\1"
}
]
},
From 1010388910cf86e100c4134f3d3cd28cd6214f39 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Wed, 23 May 2018 11:49:39 -0500
Subject: [PATCH 10/12] Update clipwatching.json
---
plugin.video.alfa/servers/clipwatching.json | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/plugin.video.alfa/servers/clipwatching.json b/plugin.video.alfa/servers/clipwatching.json
index 2f1ac627..a411ac8b 100644
--- a/plugin.video.alfa/servers/clipwatching.json
+++ b/plugin.video.alfa/servers/clipwatching.json
@@ -4,8 +4,12 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "clipwatching.com/(.*?.html)",
+ "pattern": "clipwatching.com/(e.*?.html)",
"url": "http://clipwatching.com/\\1"
+ },
+ {
+ "pattern": "clipwatching.com/(\\w+)",
+ "url": "http://clipwatching.com/\\1.html"
}
]
},
From 6f1907c946665f35e596c2d23ed1eb99b7589566 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Wed, 23 May 2018 11:50:30 -0500
Subject: [PATCH 11/12] gounlimited: nuevo server
---
plugin.video.alfa/servers/gounlimited.json | 42 ++++++++++++++++++++++
plugin.video.alfa/servers/gounlimited.py | 32 +++++++++++++++++
2 files changed, 74 insertions(+)
create mode 100644 plugin.video.alfa/servers/gounlimited.json
create mode 100644 plugin.video.alfa/servers/gounlimited.py
diff --git a/plugin.video.alfa/servers/gounlimited.json b/plugin.video.alfa/servers/gounlimited.json
new file mode 100644
index 00000000..d4d4cd86
--- /dev/null
+++ b/plugin.video.alfa/servers/gounlimited.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "https://gounlimited.to/embed-(.*?).html",
+ "url": "https://gounlimited.to/embed-\\1.html"
+ }
+ ]
+ },
+ "free": true,
+ "id": "gounlimited",
+ "name": "gounlimited",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "Incluir en lista negra",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "Incluir en lista de favoritos",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://s31.postimg.cc/bsiaj2q2j/goo.png"
+}
\ No newline at end of file
diff --git a/plugin.video.alfa/servers/gounlimited.py b/plugin.video.alfa/servers/gounlimited.py
new file mode 100644
index 00000000..dcf835af
--- /dev/null
+++ b/plugin.video.alfa/servers/gounlimited.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# --------------------------------------------------------
+# Conector GoUnlimited By Alfa development Group
+# --------------------------------------------------------
+
+import re
+from core import httptools
+from platformcode import logger
+from core import scrapertools
+from lib import jsunpack
+
+
+def test_video_exists(page_url):
+ data = httptools.downloadpage(page_url).data
+ if data == "File was deleted":
+ return False, "[gounlimited] El video ha sido borrado"
+ return True, ""
+
+
+def get_video_url(page_url, premium=False, user="", password="", video_password=""):
+ logger.info("url=" + page_url)
+ video_urls = []
+ data = httptools.downloadpage(page_url).data
+ data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data)
+ packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)")
+ unpacked = jsunpack.unpack(packed_data)
+ patron = "file:(.*?),label:(.*?)}"
+ matches = re.compile(patron, re.DOTALL).findall(unpacked)
+ for url, quality in matches:
+ video_urls.append(['%s' % quality, url])
+ video_urls.sort(key=lambda x: int(x[0]))
+ return video_urls
From 62c386c2846cee2110bee47fbe96432d3abd273d Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Wed, 23 May 2018 11:51:33 -0500
Subject: [PATCH 12/12] cinedetodo: nuevo canal
---
plugin.video.alfa/channels/cinedetodo.json | 75 ++++++++
plugin.video.alfa/channels/cinedetodo.py | 207 +++++++++++++++++++++
2 files changed, 282 insertions(+)
create mode 100644 plugin.video.alfa/channels/cinedetodo.json
create mode 100644 plugin.video.alfa/channels/cinedetodo.py
diff --git a/plugin.video.alfa/channels/cinedetodo.json b/plugin.video.alfa/channels/cinedetodo.json
new file mode 100644
index 00000000..037cbb3b
--- /dev/null
+++ b/plugin.video.alfa/channels/cinedetodo.json
@@ -0,0 +1,75 @@
+{
+ "id": "cinedetodo",
+ "name": "CINEDETODO",
+ "active": true,
+ "adult": false,
+ "language": ["lat"],
+ "thumbnail": "https://s31.postimg.cc/win1ffxyj/cinedetodo.png",
+ "banner": "",
+ "version": 1,
+ "categories": [
+ "movies"
+ ],
+ "settings": [
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Incluir en busqueda global",
+ "default": false,
+ "enabled": false,
+ "visible": false
+ },
+ {
+ "id": "filter_languages",
+ "type": "list",
+ "label": "Mostrar enlaces en idioma...",
+ "default": 0,
+ "enabled": true,
+ "visible": true,
+ "lvalues": [
+ "No filtrar",
+ "LAT"
+ ]
+ },
+ {
+ "id": "include_in_newest_peliculas",
+ "type": "bool",
+ "label": "Incluir en Novedades - Peliculas",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_infantiles",
+ "type": "bool",
+ "label": "Incluir en Novedades - Infantiles",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_latino",
+ "type": "bool",
+ "label": "Incluir en Novedades - latino",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_terror",
+ "type": "bool",
+ "label": "Incluir en Novedades - Terror",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_documentales",
+ "type": "bool",
+ "label": "Incluir en Novedades - Documentales",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ }
+ ]
+}
diff --git a/plugin.video.alfa/channels/cinedetodo.py b/plugin.video.alfa/channels/cinedetodo.py
new file mode 100644
index 00000000..6ada755c
--- /dev/null
+++ b/plugin.video.alfa/channels/cinedetodo.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# -*- Channel CinemaHD -*-
+# -*- Created for Alfa-addon -*-
+# -*- By the Alfa Develop Group -*-
+
+import re
+import urllib
+from channelselector import get_thumb
+from core import httptools
+from core import scrapertools
+from core import servertools
+from core import tmdb
+from core.item import Item
+from platformcode import config, logger
+from channels import autoplay
+from channels import filtertools
+
+
+host = 'http://www.cinedetodo.com/'
+
+IDIOMAS = {'Latino': 'LAT'}
+list_language = IDIOMAS.values()
+list_quality = []
+list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
+
+
+def mainlist(item):
+ logger.info()
+
+ autoplay.init(item.channel, list_servers, list_quality)
+
+ itemlist = list()
+ itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
+ itemlist.append(item.clone(title="Generos", action="section", section='genre',
+ thumbnail=get_thumb('genres', auto=True)))
+ # itemlist.append(item.clone(title="Por Calidad", action="section", section='quality',
+ # thumbnail=get_thumb('quality', auto=True)))
+ itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha',
+ thumbnail=get_thumb('alphabet', auto=True)))
+ itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
+ thumbnail=get_thumb('search', auto=True)))
+
+ autoplay.show_option(item.channel, itemlist)
+
+ return itemlist
+
+
+def get_source(url):
+ logger.info()
+ data = httptools.downloadpage(url).data
+ data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data)
+ return data
+
+
+def list_all(item):
+ logger.info()
+ itemlist = []
+
+ data = get_source(item.url)
+ if item.section == 'alpha':
+ patron = '\d+.*?(.*?).*?'
+ patron += '(\d{4}) | '
+ else:
+ patron = '.*?
(.*?)<\/h3>.*?(.*?)<\/span>'
+ data = get_source(item.url)
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
+
+ url = scrapedurl
+ if "|" in scrapedtitle:
+ scrapedtitle= scrapedtitle.split("|")
+ contentTitle = scrapedtitle[0].strip()
+ else:
+ contentTitle = scrapedtitle
+
+ contentTitle = re.sub('\(.*?\)','', contentTitle)
+
+ title = '%s [%s]'%(contentTitle, year)
+ thumbnail = 'http:'+scrapedthumbnail
+ itemlist.append(item.clone(action='findvideos',
+ title=title,
+ url=url,
+ thumbnail=thumbnail,
+ contentTitle=contentTitle,
+ infoLabels={'year':year}
+ ))
+ tmdb.set_infoLabels_itemlist(itemlist, True)
+
+ # Paginación
+
+ url_next_page = scrapertools.find_single_match(data,'')
+ if url_next_page:
+ itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
+ return itemlist
+
+def section(item):
+ logger.info()
+ itemlist = []
+
+ data = get_source(host)
+
+ action = 'list_all'
+ if item.section == 'quality':
+ patron = 'menu-item-object-category.*?menu-item-\d+>(.*?)<\/a>'
+ elif item.section == 'genre':
+ patron = '(.*?)'
+ elif item.section == 'year':
+ patron = 'custom menu-item-15\d+>(\d{4})<\/a><\/li>'
+ elif item.section == 'alpha':
+ patron = '(.*?)'
+ action = 'list_all'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for data_one, data_two in matches:
+
+ url = data_one
+ title = data_two
+ if title != 'Ver más':
+ new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
+ itemlist.append(new_item)
+
+ return itemlist
+
+
+def findvideos(item):
+ logger.info()
+
+ itemlist = []
+ data = get_source(item.url)
+ data = scrapertools.decodeHtmlentities(data)
+
+ patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for option, scrapedurl in matches:
+ scrapedurl = scrapedurl.replace('"','').replace('&','&')
+ data_video = get_source(scrapedurl)
+ url = scrapertools.find_single_match(data_video, '.*?src=(.*?) frameborder')
+ opt_data = scrapertools.find_single_match(data,'%s>.*?.*?(.*?)'%option).split('-')
+ language = opt_data[0].strip()
+ language = language.replace('(','').replace(')','')
+ quality = opt_data[1].strip()
+ if url != '' and 'youtube' not in url:
+ itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
+ elif 'youtube' in url:
+ trailer = item.clone(title='Trailer', url=url, action='play', server='youtube')
+
+ itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
+ i.language, i.quality))
+ tmdb.set_infoLabels_itemlist(itemlist, True)
+ try:
+ itemlist.append(trailer)
+ except:
+ pass
+
+ # Requerido para FilterTools
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+
+ autoplay.start(itemlist, item)
+
+ if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
+ itemlist.append(
+ Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
+ action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
+
+
+ return itemlist
+
+
+def search(item, texto):
+ logger.info()
+ texto = texto.replace(" ", "+")
+ item.url = item.url + texto
+
+ if texto != '':
+ return list_all(item)
+ else:
+ return []
+
+
+def newest(categoria):
+ logger.info()
+ itemlist = []
+ item = Item()
+ try:
+ if categoria in ['peliculas','latino']:
+ item.url = host
+ elif categoria == 'infantiles':
+ item.url = host+'/animacion'
+ elif categoria == 'terror':
+ item.url = host+'/terror'
+ elif categoria == 'documentales':
+ item.url = host+'/documental'
+ itemlist = list_all(item)
+ if itemlist[-1].title == 'Siguiente >>':
+ itemlist.pop()
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("{0}".format(line))
+ return []
+
+ return itemlist