Merge pull request #245 from Intel11/actualizados

Actualizados
This commit is contained in:
Alfa
2018-04-11 16:34:00 -05:00
committed by GitHub
8 changed files with 190 additions and 115 deletions

View File

@@ -23,36 +23,6 @@ list_quality = CALIDADES.values()
list_servers = ['directo', 'openload']
host = 'http://doomtv.net/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110',
'Referer': host}
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Bélico Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Biográfia": "https://s15.postimg.org/5lrpbx323/biografia.png",
"Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png",
"Familiar": "https://s7.postimg.org/6s7vdhqrf/familiar.png",
"Intriga": "https://s27.postimg.org/v9og43u2b/intriga.png",
"Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png",
"Guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png",
"Estrenos": "https://s21.postimg.org/fy69wzm93/estrenos.png",
"Peleas": "https://s14.postimg.org/we1oyg05t/peleas.png",
"Policiales": "https://s21.postimg.org/n9e0ci31z/policial.png",
"Uncategorized": "https://s30.postimg.org/uj5tslenl/otros.png",
"LGBT": "https://s30.postimg.org/uj5tslenl/otros.png"}
def mainlist(item):
@@ -177,15 +147,13 @@ def seccion(item):
url = scrapedurl
title = scrapedtitle
thumbnail = ''
if title in tgenero:
thumbnail = tgenero[title]
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action='lista',
title=title,
url=url,
thumbnail = thumbnail
thumbnail=thumbnail
))
return itemlist
@@ -221,64 +189,25 @@ def newest(categoria):
return itemlist
def get_vip(item, url):
logger.info()
itemlist = []
data = httptools.downloadpage(url+'/videocontent').data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
video_id = scrapertools.find_single_match(data, 'id=videoInfo ><span >(.*?)</span>')
new_url = 'https://v.d0stream.com/api/videoinfo/%s?src-url=https://Fv.d0stream.com' % video_id
json_data = httptools.downloadpage(new_url).data
dict_data = jsontools.load(json_data)
sources = dict_data['sources']
for vip_item in sources['mp4_cdn']:
vip_url= vip_item['url']
vip_quality = vip_item['label']
title ='%s [%s]' % (item.title, vip_quality)
itemlist.append(item.clone(title = title, url=vip_url, action='play', quality=vip_quality, server='directo'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
player_vip = scrapertools.find_single_match(data, 'class=movieplay><iframe src=(https://v.d0stream.com.*?) frameborder')
itemlist.extend(get_vip(item, player_vip))
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|frameborder|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
if 'content' in urls:
urls = '%s%s'%('http:',urls)
hidden_data = httptools.downloadpage(urls).data
hidden_data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", hidden_data)
patron = 'sources: \[{file: (.*?),'
matches = re.compile(patron, re.DOTALL).findall(hidden_data)
for videoitem in matches:
new_item = Item(
channel = item.channel,
url = videoitem,
title = item.title,
contentTitle = item.title,
action = 'play',
)
itemlist.append(new_item)
else:
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
contentTitle=item.title,
action='play',
)
itemlist.append(new_item)
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
contentTitle=item.title,
action='play',
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import scrapertools
@@ -215,37 +216,29 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = '<div class=TPlayer.*?\s+id=(.*?)><iframe width=560 height=315 src=(.*?) frameborder=0'
patron = '<div class=TPlayerTb.Current id=(.*?)>.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
base_link = 'https://repros.live/player/ajaxdata'
for opt, urls_page in matches:
logger.debug ('option: %s' % opt)
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.'
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
video_data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(video_data,'<button id="(.*?)"')
for server in servers:
quality = item.quality
info_urls = urls_page.replace('embed','get')
video_info=httptools.downloadpage(info_urls+'/'+server).data
video_info = jsontools.load(video_info)
video_id = video_info['extid']
video_server = video_info['server']
video_status = video_info['status']
if video_status in ['finished', 'propio']:
if video_status == 'finished':
url = 'https://'+video_server+'/embed/'+video_id
else:
url = 'https://'+video_server+'/e/'+video_id
title = item.contentTitle + ' [%s] [%s]'%(quality, language)
itemlist.append(item.clone(title=title,
url=url,
action='play',
language=language,
quality=quality
))
itemlist = servertools.get_servers_itemlist(itemlist)
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
headers = {'referer':item.url}
if 'trembed' in urls_page:
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data=httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="(.*?)" ')
video_data = httptools.downloadpage(urls_page, headers=headers).data
servers = scrapertools.find_multiple_matches(video_data,'data-player="(.*?)" data-embed="(.*?)">')
for server, code in servers:
post = {'codigo':code}
post = urllib.urlencode(post)
video_json=jsontools.load(httptools.downloadpage('https://repros.live/player/ajaxdata', post=post).data)
url = video_json['url']
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist

View File

@@ -14,7 +14,7 @@ from core import tmdb
from core.item import Item, InfoLabels
from platformcode import config, logger
host = "https://pepecine.info"
host = "https://pepecinehd.tv"
perpage = 20
def mainlist1(item):
@@ -29,7 +29,7 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Ultimas",
url=host+'/peliculas-tv-online',
url=host+'/tv-peliculas-online',
action='list_latest',
indexp=1,
type='movie'))
@@ -149,7 +149,7 @@ def list_latest(item):
logger.info()
itemlist = []
data = get_source(item.url)
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) style')
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) ')
data = get_source(data_url)
patron = "<div class='online'>.*?<img src=(.*?) class=.*?alt=(.*?) title=.*?"
patron += "<b><a href=(.*?) target=.*?align=right><div class=s7>(.*?) <"

View File

@@ -24,7 +24,7 @@
"Inglés",
"Latino",
"Catalán",
"VOS"
"VOSE"
]
},
{
@@ -44,4 +44,4 @@
"visible": true
}
]
}
}

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://.+.d0stream.com/embed/([a-z0-9]+)",
"url": "https://v.d0stream.com/embed/\\1"
}
]
},
"free": true,
"id": "dostream",
"name": "dostream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s14.postimg.org/lczc08bsx/dostream.png"
}

View File

@@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector DoStream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Dostream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
patron = "(?:'src'|'url'):'(http.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(['dostream',url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://estream.to/embed-([a-z0-9]+).html",
"url": "https://estream.to/\\1.html"
}
]
},
"free": true,
"id": "estream",
"name": "estream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s14.postimg.org/ibd54ayf5/estream.png"
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Estream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Estream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<source src=([^ ]+) type='video/mp4' label='.*?x(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url, quality in matches:
video_urls.append(["%sp [estream]" % quality, url])
return video_urls