@@ -7,7 +7,8 @@
|
||||
"thumbnail": "https://s21.postimg.cc/b43i3ljav/animeshd.png",
|
||||
"banner": "https://s4.postimg.cc/lulxulmql/animeshd-banner.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
"anime",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
"name": "AnimeYT",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "es",
|
||||
"language": "cast, lat",
|
||||
"thumbnail": "http://i.imgur.com/dHpupFk.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
"anime",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"thumbnail": "cinehindi.png",
|
||||
"banner": "http://i.imgur.com/cau9TVe.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
"movie",
|
||||
"vos"
|
||||
]
|
||||
}
|
||||
@@ -59,18 +59,22 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
full_data = data
|
||||
data = scrapertools.find_single_match(data, '<ul class=MovieList NoLmtxt.*?</ul>')
|
||||
if item.section == 'alpha':
|
||||
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
|
||||
patron += '<td>(\d{4})</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(full_data)
|
||||
else:
|
||||
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
|
||||
patron += '<h3 class=Title>(.*?)<\/h3>.*?<span class=Year>(.*?)<\/span>'
|
||||
data = get_source(item.url)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
patron += '<h3 class=Title>(.*?)<\/h3>(?:</a>|<span class=Year>(.*?)<\/span>)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
|
||||
|
||||
url = scrapedurl
|
||||
if year == '':
|
||||
year = '-'
|
||||
if "|" in scrapedtitle:
|
||||
scrapedtitle= scrapedtitle.split("|")
|
||||
contentTitle = scrapedtitle[0].strip()
|
||||
@@ -92,7 +96,7 @@ def list_all(item):
|
||||
|
||||
# Paginación
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
|
||||
url_next_page = scrapertools.find_single_match(full_data,'<a class=next.*?href=(.*?)>')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
return itemlist
|
||||
@@ -102,14 +106,13 @@ def section(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
|
||||
action = 'list_all'
|
||||
if item.section == 'quality':
|
||||
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
|
||||
patron = 'menu-item-object-category.*?menu-item-\d+ menu-category-list><a href=(.*?)>(.*?)<\/a>'
|
||||
elif item.section == 'genre':
|
||||
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
|
||||
patron = '<a href=([^ ]+) class=Button STPb>(.*?)</a>'
|
||||
elif item.section == 'year':
|
||||
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
|
||||
patron = '<li><a href=([^>]+)>(\d{4})<\/a><\/li>'
|
||||
elif item.section == 'alpha':
|
||||
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
|
||||
action = 'list_all'
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
"thumbnail": "https://s14.postimg.cc/ibh4znkox/doramasmp4.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
79
plugin.video.alfa/channels/dospelis.json
Normal file
79
plugin.video.alfa/channels/dospelis.json
Normal file
@@ -0,0 +1,79 @@
|
||||
{
|
||||
"id": "dospelis",
|
||||
"name": "DosPelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "vose"],
|
||||
"thumbnail": "https://www.dospelis.com/wp-content/uploads/2018/07/dospelislogo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
345
plugin.video.alfa/channels/dospelis.py
Normal file
345
plugin.video.alfa/channels/dospelis.py
Normal file
@@ -0,0 +1,345 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel DosPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import base64
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from lib import jsunpack
|
||||
from core.item import Item
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'mx': 'Latino', 'dk':'Latino', 'es': 'Castellano', 'en': 'VOSE', 'gb':'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = []
|
||||
|
||||
list_servers = [
|
||||
'directo',
|
||||
'openload',
|
||||
]
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'dospelis')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'dospelis')
|
||||
|
||||
host = 'https://dospelis.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
|
||||
thumbnail= get_thumb('movies', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Series', url=host+'tvshows', action='list_all', type='tvshows',
|
||||
thumbnail= get_thumb('tvshows', auto=True)))
|
||||
itemlist.append(
|
||||
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
|
||||
extra='movie'))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def menu_movies(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
|
||||
thumbnail=get_thumb('all', auto=True), type='movies'))
|
||||
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
|
||||
thumbnail=get_thumb('genres', auto=True), type='movies'))
|
||||
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
|
||||
thumbnail=get_thumb('year', auto=True), type='movies'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def get_language(lang_data):
|
||||
logger.info()
|
||||
language = []
|
||||
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
|
||||
for lang in lang_list:
|
||||
if lang == 'en':
|
||||
lang = 'vose'
|
||||
if lang not in language:
|
||||
language.append(lang)
|
||||
return language
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
duplicados=[]
|
||||
data = get_source(host+'/'+item.type)
|
||||
if 'Genero' in item.title:
|
||||
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) >(.*?)/i>'
|
||||
elif 'Año' in item.title:
|
||||
patron = '<li><a href=(.*?release.*?)>(.*?)</a>'
|
||||
elif 'Calidad' in item.title:
|
||||
patron = 'menu-item-object-dtquality menu-item-\d+><a href=(.*?)>(.*?)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
plot=''
|
||||
if 'Genero' in item.title:
|
||||
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>(.*?)<')
|
||||
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
|
||||
title = title
|
||||
plot = '%s elementos' % quantity.replace('.','')
|
||||
else:
|
||||
title = scrapedtitle
|
||||
if title not in duplicados:
|
||||
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, plot=plot, action='list_all',
|
||||
type=item.type))
|
||||
duplicados.append(title)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
if item.type == 'movies':
|
||||
patron = '<article id=post-\d+ class=item movies><div class=poster><img src=(.*?) alt=(.*?)>.*?quality>(.*?)'
|
||||
patron += '</span><\/div><a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>.*?flags(.*?)metadata'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:
|
||||
|
||||
|
||||
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
|
||||
contentTitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
url = scrapedurl
|
||||
language = get_language(lang_data)
|
||||
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
language=language,
|
||||
quality=quality,
|
||||
infoLabels={'year':year}))
|
||||
|
||||
elif item.type == 'tvshows':
|
||||
patron = '<article id=post-\d+ class=item tvshows><div class=poster><img src=(.*?) alt=(.*?)>.*?'
|
||||
patron += '<a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
|
||||
title = scrapedtitle
|
||||
contentSerieName = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(item.clone(action='seasons',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentSerieName=contentSerieName,
|
||||
infoLabels={'year':year}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
|
||||
#url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
|
||||
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=([^ ]+) />")
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='Temporada \d+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
for season in matches:
|
||||
season = season.lower().replace('temporada','')
|
||||
infoLabels['season']=season
|
||||
title = 'Temporada %s' % season
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
|
||||
infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseasons(tempitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodesxseasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='class=numerando>%s - (\d+)</div><div class=episodiotitle><a href=(.*?)>(.*?)<' % item.infoLabels['season']
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
for scrapedepisode, scrapedurl, scrapedtitle in matches:
|
||||
|
||||
infoLabels['episode'] = scrapedepisode
|
||||
url = scrapedurl
|
||||
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'id=option-(\d+).*?rptss src=(.*?) frameborder'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
lang=''
|
||||
for option, scrapedurl in matches:
|
||||
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
|
||||
quality = ''
|
||||
if lang not in IDIOMAS:
|
||||
lang = 'en'
|
||||
title = '%s %s'
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, url=scrapedurl, title=title, action='play', quality=quality, language=IDIOMAS[lang],
|
||||
infoLabels=item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
itemlist = sorted(itemlist, key=lambda it: it.language)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
def search_results(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron = '<article>.*?<a href=(.*?)><img src=(.*?) alt=(.*?) />.*?meta.*?year>(.*?)<(.*?)<p>(.*?)</p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:
|
||||
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumb
|
||||
plot = scrapedplot
|
||||
language = get_language(lang_data)
|
||||
if language:
|
||||
action = 'findvideos'
|
||||
else:
|
||||
action = 'seasons'
|
||||
|
||||
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
action=action,
|
||||
language=language, infoLabels={'year':year})
|
||||
if new_item.action == 'findvideos':
|
||||
new_item.contentTitle = new_item.title
|
||||
else:
|
||||
new_item.contentSerieName = new_item.title
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas']:
|
||||
item.url = host + 'movies/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'genre/animacion/'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'genre/terror/'
|
||||
item.type='movies'
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
54
plugin.video.alfa/channels/goodpelis.json
Normal file
54
plugin.video.alfa/channels/goodpelis.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"id": "goodpelis",
|
||||
"name": "GoodPelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "http://goodpelis.net/wp-content/uploads/2017/11/Logo-GP.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
354
plugin.video.alfa/channels/goodpelis.py
Normal file
354
plugin.video.alfa/channels/goodpelis.py
Normal file
@@ -0,0 +1,354 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel GoodPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'https://goodpelis.net/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Peliculas",
|
||||
action="menu_peliculas",
|
||||
thumbnail=get_thumb('movies', auto=True),
|
||||
))
|
||||
|
||||
# itemlist.append(item.clone(title="Series",
|
||||
# action="menu_series",
|
||||
# thumbnail=get_thumb('tvshows', auto=True),
|
||||
# ))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar", action="search",
|
||||
thumbnail=get_thumb('search', auto=True),
|
||||
url=host + '?s='
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_peliculas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Todas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url=host + 'page/1/?s'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Generos",
|
||||
action="seccion",
|
||||
url=host + 'page/1/?s',
|
||||
thumbnail=get_thumb('genres', auto=True),
|
||||
seccion='generos-pelicula'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Por Año",
|
||||
action="seccion",
|
||||
url=host + 'page/1/?s',
|
||||
thumbnail=get_thumb('year', auto=True),
|
||||
seccion='fecha-estreno'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Calidad",
|
||||
action="seccion",
|
||||
url=host + 'page/1/?s',
|
||||
thumbnail=get_thumb('quality', auto=True),
|
||||
seccion='calidad'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_series(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Todas",
|
||||
action="list_all", thumbnail=get_thumb('all', auto=True),
|
||||
url=host + 'series/page/1/',
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Generos",
|
||||
action="seccion",
|
||||
url=host + 'series/page/1/',
|
||||
thumbnail=get_thumb('genres', auto=True),
|
||||
seccion='generos-serie'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Por Año",
|
||||
action="seccion",
|
||||
url=host + 'series/page/1/',
|
||||
thumbnail=get_thumb('year', auto=True),
|
||||
seccion='series-lanzamiento'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'class=item.*?<a href=(.*?)><div class=image.*?<img src=(.*?) alt=(.*?) (?:\(\d{4}|width).*?'
|
||||
patron += 'fixyear><h2>.*?<\/h2>.*?<span class=year>(.*?)<\/span><\/div>(.*?)<\/div>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
|
||||
url = scrapedurl
|
||||
action = 'findvideos'
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ''
|
||||
contentSerieName = ''
|
||||
contentTitle = scrapedtitle
|
||||
title = contentTitle
|
||||
quality = 'Full HD'
|
||||
if scrapedquality != '':
|
||||
quality = scrapertools.find_single_match(scrapedquality, 'calidad2>(.*?)<')
|
||||
title = contentTitle + ' (%s)' % quality
|
||||
|
||||
year = scrapedyear
|
||||
|
||||
if 'series' in item.url or 'series' in url:
|
||||
action = 'seasons'
|
||||
contentSerieName = contentTitle
|
||||
quality = ''
|
||||
new_item = Item(channel=item.channel,
|
||||
action=action,
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
quality=quality,
|
||||
infoLabels={'year': year}
|
||||
)
|
||||
if 'series' not in item.url:
|
||||
new_item.contentTitle = contentTitle
|
||||
else:
|
||||
new_item.contentSerieName = contentSerieName
|
||||
if 'temporada' not in url:
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'<div class=pag_b><a href=(.*?)>Siguiente</a>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title='Siguiente >>>',
|
||||
url=next_page,
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def seccion(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
if item.seccion == 'generos-pelicula':
|
||||
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?<\/a> <span>.*?)<\/span><\/li>'
|
||||
elif item.seccion == 'generos-serie':
|
||||
patron = '<li class=cat-item cat-item-.*?><a href=(.*?\/series-genero\/.*?)>(.*?<\/a> <span>.*?)<\/span><\/li>'
|
||||
elif item.seccion in ['fecha-estreno', 'series-lanzamiento']:
|
||||
patron = '<li><a href=%sfecha-estreno(.*?)>(.*?)<\/a>' % host
|
||||
elif item.seccion == 'calidad':
|
||||
patron = '<li><a href=%scalidad(.*?)>(.*?)<\/a>' % host
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = ''
|
||||
if 'generos' in item.seccion:
|
||||
cantidad = scrapertools.find_single_match(scrapedtitle, '<span>(\d+)')
|
||||
title = scrapertools.find_single_match(scrapedtitle, '(.*?)<')
|
||||
url = scrapedurl
|
||||
title = scrapertools.decodeHtmlentities(title)
|
||||
title = title + ' (%s)' % cantidad
|
||||
elif item.seccion in ['series-lanzamiento', 'fecha-estreno', 'calidad']:
|
||||
title = scrapedtitle
|
||||
url = '%s%s%s' % (host, item.seccion, scrapedurl)
|
||||
|
||||
itemlist.append(item.clone(action='list_all',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<span class=title>.*?- Temporada (.*?)<\/span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for temporada in matches:
|
||||
title = 'Temporada %s' % temporada
|
||||
contentSeasonNumber = temporada
|
||||
item.infoLabels['season'] = contentSeasonNumber
|
||||
itemlist.append(item.clone(action='episodiosxtemp',
|
||||
title=title,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = '<li><div class=numerando>(\d+).*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedtemp, scrapedep, scrapedurl, scrapedtitle in matches:
|
||||
temporada = scrapedtemp
|
||||
title = temporada + 'x%s %s' % (scrapedep, scrapedtitle)
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = scrapedep
|
||||
item.infoLabels['episode'] = contentEpisodeNumber
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodiosxtemp(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
temporada = item.contentSeasonNumber
|
||||
patron = '<li><div class=numerando>%s.*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>' % temporada
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedep, scrapedurl, scrapedtitle in matches:
|
||||
title = temporada + 'x%s %s' % (scrapedep, scrapedtitle)
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = scrapedep
|
||||
item.infoLabels['episode'] = contentEpisodeNumber
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
url_list = []
|
||||
itemlist = []
|
||||
duplicados = []
|
||||
data = get_source(item.url)
|
||||
src = data
|
||||
patron = '<(?:iframe|IFRAME).*?(?:src|SRC)=(.*?) (?:scrolling|frameborder|FRAMEBORDER)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url in matches:
|
||||
lang = 'LAT'
|
||||
quality = item.quality
|
||||
title = '[%s] [%s]'
|
||||
if url != '':
|
||||
itemlist.append(item.clone(title=title, url=url, action='play', language=lang))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server, i.language))
|
||||
|
||||
if item.infoLabels['mediatype'] == 'movie':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos",
|
||||
contentTitle=item.contentTitle
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas', 'latino']:
|
||||
item.url = host + 'page/1/?s'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'category/animacion/'
|
||||
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'category/terror/'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
@@ -517,10 +517,10 @@ def findvideos(item):
|
||||
url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))
|
||||
title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
|
||||
it1.append(
|
||||
item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
|
||||
Item(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
|
||||
thumbnail=item.thumbnail, show=item.show))
|
||||
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
|
||||
thumbnail=item.thumbnail, show=item.show, folder=True))
|
||||
thumbnail=item.thumbnail, show=item.show, language=item.language, folder=True))
|
||||
data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
|
||||
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
|
||||
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
|
||||
@@ -566,8 +566,8 @@ def findvideos(item):
|
||||
if account:
|
||||
url += "###" + id + ";" + type
|
||||
it2.append(
|
||||
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
|
||||
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels, language=idioma,
|
||||
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
|
||||
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
|
||||
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
|
||||
|
||||
@@ -85,7 +85,8 @@ def search(item, texto):
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
itemlist =[]
|
||||
data = httptools.downloadpage(item.url, add_referer=True).data
|
||||
headers = {'Referer':host, 'X-Requested-With': 'XMLHttpRequest'}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
dict_data = jsontools.load(data)
|
||||
list =dict_data["data"] [item.type]
|
||||
if item.type == "m":
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PoseidonHD -*-
|
||||
# -*- Channel PelisR -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
@@ -31,8 +31,8 @@ list_servers = [
|
||||
'rapidvideo'
|
||||
]
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'poseidonhd')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'poseidonhd')
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'pelisr')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'pelisr')
|
||||
|
||||
host = 'https://pelisr.com/'
|
||||
|
||||
@@ -79,7 +79,6 @@ def get_source(url):
|
||||
def get_language(lang_data):
|
||||
logger.info()
|
||||
language = []
|
||||
logger.debug(lang_data)
|
||||
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
|
||||
for lang in lang_list:
|
||||
if lang == 'en':
|
||||
@@ -125,8 +124,6 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
logger.debug(data)
|
||||
#return
|
||||
|
||||
if item.type == 'movies':
|
||||
patron = '<article id=post-\d+ class=item movies><div class=poster><img src=(.*?) alt=(.*?)>.*?quality>(.*?)'
|
||||
@@ -185,7 +182,6 @@ def seasons(item):
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
logger.debug(data)
|
||||
patron='Temporada \d+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -243,11 +239,9 @@ def findvideos(item):
|
||||
from lib import generictools
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
logger.debug(data)
|
||||
patron = 'id=option-(\d+).*?rptss src=(.*?) frameborder'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for option, scrapedurl in matches:
|
||||
logger.debug(scrapedurl)
|
||||
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
|
||||
quality = ''
|
||||
if lang not in IDIOMAS:
|
||||
@@ -261,7 +255,6 @@ def findvideos(item):
|
||||
|
||||
elif 'wd=' in scrapedurl:
|
||||
new_id = scrapertools.find_single_match(scrapedurl, 'wd=(.*?)&')
|
||||
logger.debug('new_id %s' % new_id)
|
||||
new_id = new_id[::-1]
|
||||
new_url = 'https://pelisr.com/encri/?wr=%s' % new_id
|
||||
headers = {'Referer': scrapedurl}
|
||||
|
||||
@@ -2,10 +2,13 @@
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -316,40 +319,33 @@ def lasmas(item):
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def get_link(data):
|
||||
new_url = scrapertools.find_single_match(data, '(?:IFRAME|iframe) src=(.*?) scrolling')
|
||||
return new_url
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
host = 'https://www.locopelis.tv/'
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
anterior = scrapertools.find_single_match(data, '<a class="left" href="([^"]+)" title="Cap.tulo Anterior"></a>')
|
||||
siguiente = scrapertools.find_single_match(data, '<a class="right" href="([^"]+)" title="Cap.tulo Siguiente"></a>')
|
||||
titulo = scrapertools.find_single_match(data,
|
||||
'<h1 class="tithd bold fs18px lnht30px ico_b pdtop10px">([^<]+)</h1> ')
|
||||
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
|
||||
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
if 'youtube' in videoitem.url:
|
||||
itemlist.remove(videoitem)
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.fanart = item.fanart
|
||||
videoitem.title = titulo + " " + videoitem.server
|
||||
if item.extra2 != 'todos':
|
||||
data = httptools.downloadpage(anterior).data
|
||||
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
|
||||
if not existe:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Anterior', url=anterior,
|
||||
thumbnail='https://s1.postimg.cc/dbq8gvldb/anterior.png', folder=True))
|
||||
|
||||
data = httptools.downloadpage(siguiente).data
|
||||
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
|
||||
if not existe:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Siguiente', url=siguiente,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', folder=True))
|
||||
new_url = get_link(get_source(item.url))
|
||||
new_url = get_link(get_source(new_url))
|
||||
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
|
||||
new_url = '%s%s' % (host, 'playeropstream/api.php')
|
||||
post = {'h': video_id}
|
||||
post = urllib.urlencode(post)
|
||||
data = httptools.downloadpage(new_url, post=post).data
|
||||
json_data = jsontools.load(data)
|
||||
url = json_data['url']
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = '%s' % server
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
|
||||
server=server, infoLabels=item.infoLabels))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
"thumbnail": "https://s22.postimg.cc/nucz720sx/image.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -6,6 +6,10 @@
|
||||
{
|
||||
"pattern": "https://estream.to/embed-([a-z0-9]+).html",
|
||||
"url": "https://estream.to/\\1.html"
|
||||
},
|
||||
{
|
||||
"pattern": "https://estream.xyz/embed-([a-z0-9]+).html",
|
||||
"url": "https://estream.to/\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
42
plugin.video.alfa/servers/jawcloud.json
Normal file
42
plugin.video.alfa/servers/jawcloud.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(jawcloud.co/embed-([A-z0-9]+))",
|
||||
"url": "https://\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "jawcloud",
|
||||
"name": "jawcloud",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s8.postimg.cc/b64mzlgxh/jawcloud1.png"
|
||||
}
|
||||
20
plugin.video.alfa/servers/jawcloud.py
Normal file
20
plugin.video.alfa/servers/jawcloud.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
|
||||
video_urls.append([".MP4 [jawcloud]", videourl])
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user