Merge pull request #595 from shlibidon/master

Watchseries: Nuevo Addon
This commit is contained in:
Alfa
2019-04-10 16:20:48 -05:00
committed by GitHub
9 changed files with 385 additions and 24 deletions

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://www.animejl.net/img/Logo.png",
"thumbnail": "https://i.imgur.com/S6foTE9.png",
"banner": "",
"categories": [
"anime"
@@ -27,4 +27,4 @@
"visible": true
}
]
}
}

View File

@@ -113,9 +113,10 @@ def lista(item):
patron = 'class="anime"><a href="([^"]+)">'
patron +='<div class="cover" style="background-image: url\((.*?)\)">.*?<h2>([^<]+)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
if item.extra != "next":
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
@@ -133,12 +134,12 @@ def lista(item):
'<a href="([^"]+)" data-ci-pagination-page="\d+" rel="next"')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="lista",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
itemlist.append(item.clone(action="lista",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
extra="next"
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://peliculonhd.com/wp-content/uploads/2018/09/peliculonnewlogo3-.png",
"thumbnail": "https://www.peliculonhd.tv/wp-content/uploads/2018/09/peliculonnewlogo3-.png",
"banner": "",
"categories": [
"movie",

View File

@@ -0,0 +1,23 @@
{
"id": "watchseries",
"name": "WatchSeries",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://www2.watchmovie.io/img/icon/new-logo.png",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings":[
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,268 @@
# -*- coding: utf-8 -*-
import re
import urllib
import base64
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'default': 'VO'}
title2 = {'Action': 'Action2','Xmas':'Christmas', 'Kungfu':'Martial%20Arts','Psychological':'Genres','TV Show':'TV', 'Sitcom':'Genres', 'Costume':'Genres', 'Mythological':'Genres'}
list_language = IDIOMAS.values()
list_servers = ['directo', 'rapidvideo', 'streamango', 'openload', 'xstreamcdn']
list_quality = ['default']
host = "https://www2.watchmovie.io/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Películas", action='menu_movies', text_color="0xFFD4AF37", text_bold=True, thumbnail= "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"))
itemlist.append(item.clone(title='Series', action='menu_series', thumbnail= "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png", text_color="0xFFD4AF37", text_bold=True))
itemlist.append(
item.clone(title="Buscar...", action="search", text_color="0xFF5AC0E0", text_bold=True, url=host + 'search.html?keyword=', thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Search.png"))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, title="Estrenos", fanart="http://i.imgur.com/c3HS8kj.png", action="novedades_cine", url=host, thumbnail="https://github.com/master-1970/resources/raw/master/images/genres/0/New%20Releases.png"))
itemlist.append(
Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, title="Más Vistas", action="popular", url=host + "popular", extra="popular", thumbnail="https://github.com/master-1970/resources/raw/master/images/genres/0/All%20Movies%20by%20Watched.png"))
itemlist.append(Item(channel=item.channel, text_color="0xFFD4AF37", text_bold=True, title="Géneros", action="section", url=host + "popular", thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genres.png"))
itemlist.append(Item(channel=item.channel, text_color="0xFFD4AF37", text_bold=True, title="Año", action="section", url=host + "popular", thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png"))
return itemlist
def menu_series(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, text_color="0xFF399437", text_bold=True, action="novedades_episodios", title="Últimos Episodios de:", folder=False, thumbnail=item.thumbnail))
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Series Tv", url=host + "watch-series", extra= "watch-series", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/New%20TV%20Episodes.png', type='tvshows'))
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Doramas", url=host + "drama", extra= "drama", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Asian%20Movies.png', type='tvshows'))
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Animes", url=host + "anime", extra= "anime", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Anime.png', type='anime'))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return popular(item)
except:
itemlist.append(item.clone(url='', title='No match found...', action=''))
return itemlist
def section(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = []
if 'Géneros' in item.title:
patron = '<a href="([^"]+)" class="wpb_button wpb_btn-primary wpb_btn-small ">(.*?)</a>'
action = 'popular'
icono = ''
elif 'Año' in item.title:
patron = '<a href="([^"]+)" class="wpb_button wpb_btn-info wpb_btn-small ">(.*?)</a>'
action = 'popular'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = host + scrapedurl
title = scrapedtitle
if 'Géneros' in item.title:
if title in title2:
title1 = title2[title]
else:
title1 = title
icono = 'https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/'+ title1 +'.png'
else:
icono = 'https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png'
itemlist.append(Item(channel=item.channel,
action=action,
title=title,
url=url,
text_color="0xFF5AC0E0",
extra="popular",
thumbnail = icono
))
return itemlist
def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
matches = re.compile('<div class="video_likes icon-tag"> (.*?)</div>[\s\S]+?<a href="(.*?)" class="view_more"></a>[\s\S]+?<img src="([^"]+)" alt="" class="imgHome" title="" alt="([^"]+)"[\s\S]+?</li>', re.DOTALL).findall(data)
itemlist = []
for episode, url, thumbnail,season in matches:
if item.extra == "watch-series":
scrapedinfo = season.split(' - ')
scrapedtitle = scrapedinfo[0]
season = scrapertools.find_single_match(scrapedinfo[1], 'Season (\d+)')
episode = scrapertools.find_single_match(episode, 'Episode (\d+)')
title = scrapedtitle + " %sx%s" % (season, episode)
else:
scrapedtitle = season
title = scrapedtitle + ' - ' + episode
url = urlparse.urljoin(host, url)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
contentSerieName=scrapedtitle,)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
if url_pagination:
url = urlparse.urljoin(host + item.extra, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title=title, url=url, extra=item.extra))
return itemlist
def novedades_cine(item):
logger.info()
data = httptools.downloadpage(item.url).data
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
matches = re.compile('<div class="video_likes icon-tag"> (.*?)</div>[\s\S]+?<a href="(.*?)" class="view_more"></a>[\s\S]+?<img src="([^"]+)" alt="" class="imgHome" title="" alt="([^"]+)"[\s\S]+?</li>', re.DOTALL).findall(data)
itemlist = []
for episode, url, thumbnail,season in matches:
scrapedyear = '-'
title = "%s [%s]" % (season, episode)
url = urlparse.urljoin(host, url)
new_item = Item(channel=item.channel, action="findvideos",title=title, url=url, contentTitle=season, thumbnail=thumbnail,infoLabels={'year':scrapedyear})
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
if url_pagination:
url = urlparse.urljoin(host + item.extra, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="novedades_cine", title=title, url=url))
return itemlist
def popular(item):
logger.info()
data = httptools.downloadpage(item.url).data
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
matches = re.compile('<div class="video_image_container sdimg">[\s\S]+?<a href="(.*?)" class="view_more" title="([^"]+)"></a>[\s\S]+?<img src="([^"]+)" alt=""', re.DOTALL).findall(data)
itemlist = []
for url, title, thumbnail in matches:
scrapedyear = '-'
if "- Season " in title:
scrapedinfo = title.split(' - Season ')
title2 = scrapedinfo[0]
season = scrapedinfo[1]
url = urlparse.urljoin(host, url)+ "/season"
new_item = Item(channel=item.channel, action="episodios",title=title, contentSerieName=title2, url=url, thumbnail=thumbnail,infoLabels={'season':season})
elif "-info/" in url:
url = urlparse.urljoin(host, url)
url = url.replace("-info/", "/")+ "/all"
new_item = Item(channel=item.channel, action="episodios",title=title, contentSerieName=title, url=url, thumbnail=thumbnail)
else:
url = urlparse.urljoin(host, url)+"-episode-0"
extra = "film"
new_item = Item(channel=item.channel, action="findvideos",title=title, url=url, extra=extra, contentTitle=title, thumbnail=thumbnail,infoLabels={'year':scrapedyear})
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
if url_pagination:
url = urlparse.urljoin(host + item.extra, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="popular", title=title, url=url))
return itemlist
def episodios(item):
logger.info()
itemlist = []
infoLabels = item.infoLabels
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
matches = re.compile('<div class="vid_info"><span><a href="(.*?)" title="(.*?)" class="videoHname"><b>Episode (\d+)', re.DOTALL).findall(data)
for url, title, episode in matches:
url = urlparse.urljoin(host, url)
thumbnail = item.thumbnail
title = title + " - Ep. " + episode
if " Season " in title:
scrapedinfo = title.split(' Season ')
title = scrapedinfo[0] + " " + infoLabels['season'] + "x" + episode
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if "-episode-0" in item.url:
data1 = httptools.downloadpage(item.url).data
if "Page not found</h1>" in data1:
item.url = item.url.replace("-episode-0", "-episode-1")
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
matches = scrapertools.find_multiple_matches(data, 'data-video="(.*?)"')
url = ''
urlsub = ''
urlsub = scrapertools.find_single_match(data, "&sub=(.*?)&cover")
if urlsub != '':
urlsub = base64.b64decode(urlsub)
urlsub = 'https://sub.movie-series.net' + urlsub
for source in matches:
if '/streaming.php' in source:
new_data = httptools.downloadpage("https:" + source).data
url = scrapertools.find_single_match(new_data, "file: '(https://redirector.*?)'")
thumbnail= "https://martechforum.com/wp-content/uploads/2015/07/drive-300x300.png"
if url == "":
source = source.replace("streaming.php", "load.php")
elif '/load.php' in source:
new_data = httptools.downloadpage("https:" + source).data
url = scrapertools.find_single_match(new_data, "file: '(https://[A-z0-9]+.cdnfile.info/.*?)'")
thumbnail= "https://vidcloud.icu/img/logo_vid.png"
else:
url = source
thumbnail= ""
if "https://redirector." in url or "cdnfile.info" in url:
url = url+"|referer=https://vidcloud.icu/"
if url != "":
itemlist.append(Item(channel=item.channel, url=url, title='%s', action='play',plot=item.plot, thumbnail=thumbnail, subtitle=urlsub))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra == 'film':
itemlist.append(Item(channel=item.channel, title="Añadir a la Videoteca", text_color="yellow",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "gamovideo.com/(?:embed-|)([a-z0-9]+)",
"url": "http://gamovideo.com/embed-\\1.html"
"url": "http://gamovideo.com/\\1"
}
]
},
@@ -39,4 +39,4 @@
}
],
"thumbnail": "server_gamovideo.png"
}
}

View File

@@ -7,17 +7,14 @@ from core import scrapertools
from lib import jsunpack
from platformcode import logger
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
headers = {"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0"}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code==404:
data = httptools.downloadpage(page_url, headers=headers, add_referer=True)
data = data.data
data = httptools.downloadpage(page_url, headers=headers, cookies=False).data
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
if "File was deleted" in data or "<noscript>" not in data or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
if "Video is processing now" in data:
return False, "[Gamovideo] El video está procesándose en estos momentos. Inténtelo mas tarde."
@@ -28,10 +25,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code==404:
data = httptools.downloadpage(page_url, headers=headers, add_referer=True)
data = data.data
data = httptools.downloadpage(page_url, headers=headers, cookies=False).data
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "":

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.xstreamcdn.com/v/[A-z0-9_-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "xstream",
"name": "xstream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://library.vodkr.com/media/24364/xstreamlogo.jpg"
}

View File

@@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import scrapertools
from core import jsontools
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "ile was deleted" in data or "Page Cannot Be Found" in data or "<title>Sorry 404 not found" in data:
return False, "[xstreamcdn.com] El archivo ha sido eliminado o no existe"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
post = {}
post = urllib.urlencode(post)
data = httptools.downloadpage("https://xstreamcdn.com/api/source/" + scrapertools.find_single_match(page_url, "/v/([A-z0-9_-]+)"), post=post, add_referer=page_url).data
json_data = jsontools.load(data)
check = json_data['success']
if check == True:
for element in json_data['data']:
media_url = element['file']
res = element['label']
tipo = element['type']
video_urls.append([tipo + " (" + res + ") [xstreamcdn]", media_url])
return video_urls