Corrección y nuevo
This commit is contained in:
@@ -16,17 +16,20 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'https://www.doramasmp4.com/'
|
||||
host = 'https://www2.doramasmp4.com/'
|
||||
|
||||
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
|
||||
|
||||
def get_source(url):
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
if referer is None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
@@ -38,9 +41,9 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
|
||||
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
|
||||
url=host + 'catalogue?format=pelicula', thumbnail=get_thumb('movies', auto=True),
|
||||
url=host + 'catalogue?format%5B%5D=movie', thumbnail=get_thumb('movies', auto=True),
|
||||
type='movie'))
|
||||
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'ajax/search.php',
|
||||
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
@@ -52,7 +55,7 @@ def doramas_menu(item):
|
||||
|
||||
itemlist =[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue?format%5B%5D=drama',
|
||||
thumbnail=get_thumb('all', auto=True), type='dorama'))
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
|
||||
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
|
||||
@@ -62,22 +65,24 @@ def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<div class=col-lg-2 col-md-3 col-6><a href=(.*?) title=.*?'
|
||||
patron += '<img src=(.*?) alt=(.*?) class=img-fluid>.*?bg-primary text-capitalize>(.*?)</span>'
|
||||
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
|
||||
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype in matches:
|
||||
media_type = item.type
|
||||
for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
|
||||
url = scrapedurl
|
||||
scrapedtype = scrapedtype.lower()
|
||||
scrapedtitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
|
||||
thumbnail=thumbnail, type=scrapedtype)
|
||||
if scrapedtype != 'dorama':
|
||||
thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
|
||||
if media_type != 'dorama':
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.type = item.type
|
||||
|
||||
else:
|
||||
new_item.contentSerieName=scrapedtitle
|
||||
@@ -99,44 +104,15 @@ def list_all(item):
|
||||
type=item.type))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search_results(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
data = httptools.downloadpage(item.url, post=item.post).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = '<a class=media p-2 href=(.*?)><img class=mr-2 src=(.*?)>.*?500>(.*?)</div>'
|
||||
patron += '<div class=text-muted tx-11>(.*?)</div>.*?200>(.*?)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches:
|
||||
new_item = Item(channel=item.channel, url=scrapedurl, thumbnail=scrapedthumbnail, title=scrapedtitle)
|
||||
|
||||
if scrapedtype != 'dorama':
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = scrapedtitle
|
||||
|
||||
else:
|
||||
new_item.contentSerieName=scrapedtitle
|
||||
new_item.action = 'episodios'
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def latest_episodes(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = dict()
|
||||
data = get_source(item.url)
|
||||
patron = '<div class=col-lg-3 col-md-6 mb-2><a href=(.*?) title=.*?'
|
||||
patron +='<img src=(.*?) alt.*?truncate-width>(.*?)<.*?mb-1>(.*?)<'
|
||||
patron = 'shadow-lg rounded" href="([^"]+)".*?src="([^"]+)".*?style="">([^<]+)<.*?>Capítulo (\d+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
|
||||
|
||||
title = '%s %s' % (scrapedtitle, scrapedep)
|
||||
contentSerieName = scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -151,8 +127,7 @@ def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<a itemprop=url href=(.*?) title=.*? class=media.*?truncate-width>(.*?)<.*?'
|
||||
patron +='text-muted mb-1>Capítulo (.*?)</div>'
|
||||
patron = '<a itemprop="url".*?href="([^"]+)".*?title="(.*?) Cap.*?".*?>Capítulo (\d+)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
@@ -186,77 +161,56 @@ def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
duplicated = []
|
||||
headers={'referer':item.url}
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'animated pulse data-url=(.*?)>'
|
||||
patron = 'link="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
|
||||
language = IDIOMAS['vo']
|
||||
else:
|
||||
language = IDIOMAS['sub']
|
||||
|
||||
if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
|
||||
item.type = 'dorama'
|
||||
#if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
|
||||
# if item.type !='episode' and item.type != 'movie':
|
||||
# item.type = 'dorama'
|
||||
# item.contentSerieName = item.contentTitle
|
||||
# item.contentTitle = ''
|
||||
# return episodios(item)
|
||||
# else:
|
||||
|
||||
for video_url in matches:
|
||||
headers = {'referer': video_url}
|
||||
token = scrapertools.find_single_match(video_url, 'token=(.*)')
|
||||
if 'fast.php' in video_url:
|
||||
video_url = 'https://player.rldev.in/fast.php?token=%s' % token
|
||||
video_data = httptools.downloadpage(video_url, headers=headers).data
|
||||
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
|
||||
else:
|
||||
video_url = 'https://www2.doramasmp4.com/api/redirect.php?token=%s' % token
|
||||
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
|
||||
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')
|
||||
|
||||
|
||||
|
||||
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
|
||||
if len(itemlist) == 0 and item.type == 'search':
|
||||
item.contentSerieName = item.contentTitle
|
||||
item.contentTitle = ''
|
||||
return episodios(item)
|
||||
else:
|
||||
|
||||
for video_url in matches:
|
||||
video_data = httptools.downloadpage(video_url, headers=headers).data
|
||||
server = ''
|
||||
if 'Media player DMP4' in video_data:
|
||||
url = scrapertools.find_single_match(video_data, "sources: \[\{'file':'(.*?)'")
|
||||
server = 'Directo'
|
||||
else:
|
||||
url = scrapertools.find_single_match(video_data, '<iframe src="(.*?)".*?scrolling="no"')
|
||||
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
|
||||
if server !='':
|
||||
new_item.server = server
|
||||
itemlist.append(new_item)
|
||||
# Requerido para FilterTools
|
||||
|
||||
# for video_item in itemlist:
|
||||
# if 'sgl.php' in video_item.url:
|
||||
# headers = {'referer': item.url}
|
||||
# patron_gvideo = "'file':'(.*?)','type'"
|
||||
# data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
|
||||
# video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
|
||||
#
|
||||
# duplicated.append(video_item.url)
|
||||
# video_item.channel = item.channel
|
||||
# video_item.infoLabels = item.infoLabels
|
||||
# video_item.language=IDIOMAS['sub']
|
||||
#
|
||||
# patron = 'var item = {id: (\d+), episode: (\d+),'
|
||||
# matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#
|
||||
# for id, episode in matches:
|
||||
# data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
|
||||
# sources = data_json['options']
|
||||
# for src in sources:
|
||||
# url = sources[src]
|
||||
#
|
||||
# if 'sgl.php' in url:
|
||||
# headers = {'referer':item.url}
|
||||
# patron_gvideo = "'file':'(.*?)','type'"
|
||||
# data_gvideo = httptools.downloadpage(url, headers = headers).data
|
||||
# url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
|
||||
#
|
||||
# new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
|
||||
# infoLabels=item.infoLabels)
|
||||
# if url != '' and url not in duplicated:
|
||||
# itemlist.append(new_item)
|
||||
# duplicated.append(url)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para FilterTools
|
||||
# Requerido para AutoPlay
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -266,14 +220,11 @@ def search(item, texto):
|
||||
import urllib
|
||||
itemlist = []
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url
|
||||
post = {'q':texto}
|
||||
post = urllib.urlencode(post)
|
||||
item.url = item.url + texto
|
||||
item.type = 'search'
|
||||
item.post = post
|
||||
if texto != '':
|
||||
try:
|
||||
return search_results(item)
|
||||
return list_all(item)
|
||||
except:
|
||||
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
|
||||
return itemlist
|
||||
|
||||
89
plugin.video.alfa/channels/vi2.json
Normal file
89
plugin.video.alfa/channels/vi2.json
Normal file
@@ -0,0 +1,89 @@
|
||||
{
|
||||
"id": "vi2",
|
||||
"name": "vi2",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://i.postimg.cc/0Qy9wf8b/vi2.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"torrent"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"CAST",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
333
plugin.video.alfa/channels/vi2.py
Normal file
333
plugin.video.alfa/channels/vi2.py
Normal file
@@ -0,0 +1,333 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Vi2.co -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import base64
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from lib import jsunpack
|
||||
from core.item import Item
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE', 'VO': 'VO'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = ['Full HD 1080p',
|
||||
'HDRip',
|
||||
'DVDScreener',
|
||||
'720p',
|
||||
'Ts Screener hq',
|
||||
'HD Real 720p',
|
||||
'DVDRip',
|
||||
'BluRay-1080p',
|
||||
'BDremux-1080p']
|
||||
|
||||
list_servers = [
|
||||
'directo',
|
||||
'openload',
|
||||
'rapidvideo',
|
||||
'jawcloud',
|
||||
'cloudvideo',
|
||||
'upvid',
|
||||
'vevio',
|
||||
'gamovideo'
|
||||
]
|
||||
|
||||
host = 'http://vi2.co'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Peliculas', action='select_menu', type='peliculas',
|
||||
thumbnail= get_thumb('movies', auto=True)))
|
||||
# itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='select_menu', type='series',
|
||||
# thumbnail= get_thumb('tvshows', auto=True)))
|
||||
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def select_menu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
url = host + '/%s/es/' % item.type
|
||||
itemlist.append(Item(channel=item.channel, title='Streaming', action='sub_menu',
|
||||
thumbnail=get_thumb('all', auto=True), type=item.type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Torrent', action='sub_menu',
|
||||
thumbnail=get_thumb('all', auto=True), type=item.type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Generos', action='section', url=url,
|
||||
thumbnail=get_thumb('genres', auto=True), type=item.type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url,
|
||||
thumbnail=get_thumb('year', auto=True), type=item.type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + 'ajax/1/?q=',
|
||||
thumbnail=get_thumb("search", auto=True), type=item.type))
|
||||
|
||||
return itemlist
|
||||
|
||||
def sub_menu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
url = host + '/%s/es/ajax/1/' % item.type
|
||||
link_type = item.title.lower()
|
||||
if link_type == 'streaming':
|
||||
link_type = 'flash'
|
||||
movies_options = ['Todas', 'Castellano', 'Latino', 'VOSE']
|
||||
tv_options = ['Ultimas', 'Ultimas Castellano', 'Ultimas Latino', 'Ultimas VOSE']
|
||||
|
||||
if item.type == 'peliculas':
|
||||
title = movies_options
|
||||
thumb_1 = 'all'
|
||||
else:
|
||||
thumb_1 = 'last'
|
||||
title = tv_options
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title[0], url=url+'?q=%s' % link_type,
|
||||
action='list_all', thumbnail=get_thumb(thumb_1, auto=True), type=item.type,
|
||||
link_type=link_type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title[1],
|
||||
url=url + '?q=%s+espanol' % link_type, action='list_all',
|
||||
thumbnail=get_thumb('cast', auto=True), type=item.type, send_lang='Español',
|
||||
link_type=link_type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title[2],
|
||||
url=url + '?q=%s+latino' % link_type, action='list_all',
|
||||
thumbnail=get_thumb('lat', auto=True), type=item.type, send_lang='Latino',
|
||||
link_type=link_type))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title[3],
|
||||
url=url + '?q=%s+subtitulado' % link_type, action='list_all',
|
||||
thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE',
|
||||
link_type=link_type))
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer is None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
excluded = ['latino', 'español', 'subtitulado', 'v.o.', 'streaming', 'torrent']
|
||||
full_data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(full_data, 'toptags-container(.*?)<div class="android-more-section">')
|
||||
|
||||
patron = 'href="([^"]+)">([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
url = host+scrapedurl.replace('/?','/ajax/1/?')
|
||||
if (item.title=='Generos' and title.lower() not in excluded and not title.isdigit()) or (item.title=='Por Año' and title.isdigit()):
|
||||
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_all(item):
|
||||
from core import jsontools
|
||||
logger.info()
|
||||
itemlist = []
|
||||
listed =[]
|
||||
quality=''
|
||||
infoLabels = {}
|
||||
json_data= jsontools.load(get_source(item.url))
|
||||
data = json_data['render']
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
#if item.type == 'peliculas':
|
||||
patron = '<img class="cover".*?src="([^"]+)" data-id="\d+" '
|
||||
patron +='alt="Ver ([^\(]+)(.*?)">'
|
||||
patron += '<div class="mdl-card__menu"><a class="clean-link" href="([^"]+)">'
|
||||
patron += '.*?<span class="link-size">(.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, extra_info, scrapedurl , size in matches:
|
||||
if item.send_lang != '':
|
||||
lang = item.send_lang
|
||||
else:
|
||||
lang = ''
|
||||
year='-'
|
||||
extra_info = extra_info.replace('(', '|').replace('[','|').replace(')','').replace(']','')
|
||||
extra_info = extra_info.split('|')
|
||||
for info in extra_info:
|
||||
info = info.strip()
|
||||
if 'Rip' in info or '1080' in info or '720' in info or 'Screener' in info:
|
||||
quality = info
|
||||
if 'ingl' in info.lower():
|
||||
info = 'VO'
|
||||
if info in IDIOMAS:
|
||||
lang = info
|
||||
elif info.isdigit():
|
||||
year = info
|
||||
|
||||
if lang in IDIOMAS:
|
||||
lang = IDIOMAS[lang]
|
||||
|
||||
title = '%s' % scrapedtitle.strip()
|
||||
if not config.get_setting('unify'):
|
||||
if year.isdigit():
|
||||
title = '%s [%s]' % (title, year)
|
||||
if quality != '':
|
||||
title = '%s [%s]' % (title, quality)
|
||||
if lang != '':
|
||||
title = '%s [%s]' % (title, lang)
|
||||
|
||||
thumbnail = scrapedthumbnail
|
||||
url = host+scrapedurl
|
||||
if item.type == 'series':
|
||||
season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)x(\d+)')
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode
|
||||
else:
|
||||
infoLabels['year'] = year
|
||||
|
||||
if title not in listed:
|
||||
new_item = Item(channel=item.channel,
|
||||
title=title,
|
||||
url=url,
|
||||
action='findvideos',
|
||||
thumbnail=thumbnail,
|
||||
type=item.type,
|
||||
language = lang,
|
||||
quality=quality,
|
||||
link_type=item.link_type,
|
||||
torrent_data= size,
|
||||
infoLabels = infoLabels
|
||||
)
|
||||
|
||||
if item.type == 'peliculas':
|
||||
new_item.contentTitle = scrapedtitle
|
||||
else:
|
||||
scrapedtitle = scrapedtitle.split(' - ')
|
||||
new_item.contentSerieName = scrapedtitle[0]
|
||||
|
||||
itemlist.append(new_item)
|
||||
listed.append(title)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
|
||||
if json_data['next']:
|
||||
actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/')
|
||||
next_page =int(actual_page) + 1
|
||||
url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page)
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type,
|
||||
action='list_all', send_lang=item.send_lang))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
import base64
|
||||
itemlist = []
|
||||
server = ''
|
||||
data = get_source(item.url)
|
||||
pre_url = scrapertools.find_single_match(data, 'class="inside-link" href="([^"]+)".*?<button type="button"')
|
||||
data = get_source(host+pre_url)
|
||||
patron = 'data-video="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
lang = item.language
|
||||
quality = item.quality
|
||||
|
||||
for url in matches:
|
||||
title = ''
|
||||
link_type = ''
|
||||
url = base64.b64decode(url)
|
||||
|
||||
if 'torrent' in url and item.link_type == 'torrent':
|
||||
server = 'torrent'
|
||||
link_type = 'torrent'
|
||||
title = ' [%s]' % item.torrent_data
|
||||
elif 'torrent' not in url:
|
||||
link_type = 'flash'
|
||||
|
||||
if url != '' and (link_type == item.link_type.lower()):
|
||||
itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', server=server,
|
||||
language=lang, quality=quality, infoLabels=item.infoLabels))
|
||||
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
itemlist = sorted(itemlist, key=lambda it: it.language)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas']:
|
||||
item.url = host + 'ver/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'genero/animacion/'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'genero/terror/'
|
||||
elif categoria == 'documentales':
|
||||
item.url = host + 'genero/terror/'
|
||||
item.type=item.type
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
Reference in New Issue
Block a user