Merge branch 'master' into actualizados

This commit is contained in:
Alfa
2018-02-03 09:22:09 -05:00
committed by GitHub
7 changed files with 434 additions and 365 deletions

View File

@@ -4,11 +4,11 @@
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "",
"thumbnail": "https://s13.postimg.org/bnesayzcn/cinemahd.png",
"banner": "",
"version": 1,
"categories": [
"tvshow"
"movies"
],
"settings": [
{
@@ -34,6 +34,22 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -39,7 +39,6 @@ def get_source(url):
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
@@ -75,7 +74,7 @@ def list_all(item):
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>.*?»</a></div>')
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
@@ -92,7 +91,7 @@ def section(item):
elif item.section == 'genre':
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
elif item.section == 'year':
patron = 'menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
elif item.section == 'alpha':
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
action = 'list_all'

View File

@@ -1,7 +1,7 @@
{
"id": "cuelgame",
"name": "Cuelgame",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "cuelgame.png",

View File

@@ -13,7 +13,7 @@ from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
host = "https://hdfull.tv"
host = "http://hdfull.tv"
if config.get_setting('hdfulluser', 'hdfull'):
account = True
@@ -39,11 +39,22 @@ def login():
httptools.downloadpage(host, post=post)
def set_host():
global host
logger.info()
hosts_list= [host, 'https://hdfull.tv', 'https://hdfull.me']
for url in hosts_list:
data = httptools.downloadpage(url, only_headers=True)
if data.sucess:
host = url
break
def mainlist(item):
logger.info()
itemlist = []
set_host()
itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas", url=host, folder=True))
itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url=host, folder=True))
@@ -569,7 +580,7 @@ def generos(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="https://hdfull.tv/peliculas"(.*?)</ul>')
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="%s/peliculas"(.*?)</ul>' % host)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -591,7 +602,7 @@ def generos_series(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="https://hdfull.tv/series"(.*?)</ul>')
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="%s/series"(.*?)</ul>' % host)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -642,10 +653,10 @@ def findvideos(item):
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
data_js = httptools.downloadpage("https://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("https://hdfull.tv/js/providers.js").data
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
try:
data_js = jhexdecode(data_js)
@@ -882,4 +893,4 @@ def obfs(data, key, n=126):
number = (ord(chars[i]) + key) % n
chars[i] = chr(number)
return "".join(chars)
return "".join(chars)

View File

@@ -119,11 +119,11 @@ def scraper(item):
patron += 'alt="([^"]+)".*?'
patron += '">([^<]+)<.*?'
patron += '<div class="l">(.*?)<\/a><h3>.*?'
patron += '<\/a><\/h3> <span>(.*?)<'
#patron += '<\/a><\/h3> <span>(.*?)<'
action = "findvideos"
matches = scrapertools.find_multiple_matches(bloque_enlaces, patron)
for url, thumb, title, quality, check_idioma, year in matches:
year = year.strip()
for url, thumb, title, quality, check_idioma in matches:
#year = year.strip()
title_fan = title
title_item = "[COLOR cornflowerblue][B]" + title + "[/B][/COLOR]"
if item.contentType != "movie":
@@ -140,20 +140,20 @@ def scraper(item):
title = title
itemlist.append(
Item(channel=item.channel, title=title, fulltitle=title, url=host + url, action=action, thumbnail=thumb,
fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|" + year, show=title,
contentType=item.contentType, folder=True, language = idiomas, infoLabels={"year":year}))
fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|", show=title,
contentType=item.contentType, folder=True, language = idiomas))
## Paginación
tmdb.set_infoLabels(itemlist)
if year:
next = scrapertools.find_single_match(data, 'href="([^"]+)" title="Siguiente página">')
if len(next) > 0:
url = next
if not "http" in url:
url = host + url
itemlist.append(
Item(channel=item.channel, action="scraper", title="[COLOR floralwhite][B]Siguiente[/B][/COLOR]",
url=url, thumbnail="http://imgur.com/jhRFAmk.png", fanart="http://imgur.com/nqmJozd.jpg",
extra=item.extra, contentType=item.contentType, folder=True))
#tmdb.set_infoLabels(itemlist)
#if year:
next = scrapertools.find_single_match(data, 'href="([^"]+)" title="Siguiente página">')
if len(next) > 0:
url = next
if not "http" in url:
url = host + url
itemlist.append(
Item(channel=item.channel, action="scraper", title="[COLOR floralwhite][B]Siguiente[/B][/COLOR]",
url=url, thumbnail="http://imgur.com/jhRFAmk.png", fanart="http://imgur.com/nqmJozd.jpg",
extra=item.extra, contentType=item.contentType, folder=True))
return itemlist

View File

@@ -1,312 +1,320 @@
# -*- coding: utf-8 -*-
# -*- Channel PelisPlus.co -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://pelisplus.co'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_menu",
))
itemlist.append(item.clone(title="Series",
action="series_menu",
))
return itemlist
def movie_menu(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="list_all",
url = host+'/estrenos/',
type = 'normal'
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host,
seccion='generos'
))
itemlist.append(item.clone(title="Por Año",
action="seccion",
url=host,
seccion='anios'
))
return itemlist
def series_menu(item):
logger.info()
itemlist =[]
itemlist.append(item.clone(title="Todas",
action="list_all",
url=host + '/series/',
type='serie'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all (item):
logger.info ()
itemlist = []
if item.type not in ['normal', 'seccion', 'serie']:
post = {'page':item.page, 'type':item.type,'id':item.id}
post = urllib.urlencode(post)
data =httptools.downloadpage(item.url, post=post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
else:
data = get_source(item.url)
if item.type == 'serie' or item.type == 'recents':
contentType = 'serie'
action = 'seasons'
else:
contentType = 'pelicula'
action = 'findvideos'
patron = 'item-%s><a href=(.*?)><figure><img.*?data-src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})<\/span>'%contentType
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
url = host+scrapedurl+'p001/'
thumbnail = scrapedthumbnail
plot= ''
contentTitle=scrapedtitle
title = contentTitle
year = scrapedyear
fanart =''
new_item=item.clone(action=action,
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
infoLabels ={'year':year}
)
if contentType =='serie':
new_item.contentSerieName=title
else:
new_item.contentTitle = title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
next_page_valid = scrapertools.find_single_match(data, '<div class=butmore(?: site=series|) page=(.*?) id=(.*?) '
'type=(.*?) limit=.*?>')
if item.type != 'normal' and (len(itemlist)>19 or next_page_valid):
type = item.type
if item.type == 'serie':
type = 'recents'
if next_page_valid:
page = str(int(next_page_valid[0])+1)
if item.type != 'recents':
id = next_page_valid[1]
type = next_page_valid[2]
else:
id =''
else:
page = str(int(item.page)+1)
id = item.id
if type =='recents':
type_pagination = '/series/pagination'
else:
type_pagination = '/pagination'
url = host+type_pagination
itemlist.append(item.clone(action = "list_all",
title = 'Siguiente >>>',
page=page,
url = url,
id = id,
type = type
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.seccion == 'generos':
patron = '<li><a href=(.*?)><i class=ion-cube><\/i>(.*?)<\/span>'
type = 'genre'
elif item.seccion == 'anios':
patron = '<li><a href=(\/peliculas.*?)>(\d{4})<\/a>'
type = 'year'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if item.seccion == 'generos':
cant = re.sub(r'.*?<span class=cant-genre>','',scrapedtitle)
only_title = re.sub(r'<.*','',scrapedtitle).rstrip()
title = only_title+' (%s)'%cant
url = host+scrapedurl
itemlist.append(
Item(channel=item.channel,
action="list_all",
title=title,
fulltitle=item.title,
url=url,
type = 'seccion'
))
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <')
next_page_url = host + next_page
import inspect
if next_page != '':
itemlist.append(item.clone(action="seccion",
title='Siguiente >>>',
url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
))
return itemlist
def seasons(item):
logger.info()
itemlist =[]
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron ='<i class=ion-chevron-down arrow><\/i>(.*?)<\/div>'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for title in matches:
season = title.replace('Temporada ','')
infoLabels['season'] = season
itemlist.append(Item(
channel=item.channel,
title=title,
url=item.url,
action='season_episodes',
contentSerieName= item.contentSerieName,
contentSeasonNumber = season,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def season_episodes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
season = str(item.infoLabels['season'])
patron = '<a href=(.*?temporada-%s\/.*?) title=.*?i-play><\/i> (.*?)<\/a>'%season
matches = matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for url, episode in matches:
episodenumber = re.sub('C.* ','',episode)
infoLabels['episode'] = episodenumber
itemlist.append(Item(channel=item.channel,
title= episode,
url = host+url,
action = 'findvideos',
infoLabels=infoLabels,
contentEpisodeNumber=episode
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist = []
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'data-source=(.*?) .*?tab.*?data.*?srt=(.*?) data-iframe=><a>(.*?)\s?-\s?(.*?)<\/a>'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
for url, sub, language, quality in matches:
if 'http' not in url:
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)
video_list.extend(servertools.find_video_items(data=data))
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
video_url.title = item.title + '(%s) (%s)' % (language, video_url.server)
if video_url.language == '':
video_url.language = language
video_url.subtitle = sub
video_url.contentTitle=item.contentTitle
else:
server = servertools.get_server_from_url(url)
video_list.append(item.clone(title=item.title,
url=url,
action='play',
quality = quality,
language = language,
server=server,
subtitle = sub
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return video_list
# -*- coding: utf-8 -*-
# -*- Channel PelisPlus.co -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://pelisplus.co'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_menu",
))
itemlist.append(item.clone(title="Series",
action="series_menu",
))
return itemlist
def movie_menu(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="list_all",
url = host+'/estrenos/',
type = 'normal'
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host,
seccion='generos'
))
itemlist.append(item.clone(title="Por Año",
action="seccion",
url=host,
seccion='anios'
))
return itemlist
def series_menu(item):
logger.info()
itemlist =[]
itemlist.append(item.clone(title="Todas",
action="list_all",
url=host + '/series/',
type='serie'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all (item):
logger.info ()
itemlist = []
if item.type not in ['normal', 'seccion', 'serie']:
post = {'page':item.page, 'type':item.type,'id':item.id}
post = urllib.urlencode(post)
data =httptools.downloadpage(item.url, post=post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
else:
data = get_source(item.url)
if item.type == 'serie' or item.type == 'recents':
contentType = 'serie'
action = 'seasons'
else:
contentType = 'pelicula'
action = 'findvideos'
patron = 'item-%s><a href=(.*?)><figure><img.*?data-src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})<\/span>'%contentType
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
url = host+scrapedurl+'p001/'
thumbnail = scrapedthumbnail
plot= ''
contentTitle=scrapedtitle
title = contentTitle
year = scrapedyear
fanart =''
new_item=item.clone(action=action,
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
infoLabels ={'year':year}
)
if contentType =='serie':
new_item.contentSerieName=title
else:
new_item.contentTitle = title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
next_page_valid = scrapertools.find_single_match(data, '<div class=butmore(?: site=series|) page=(.*?) id=(.*?) '
'type=(.*?) limit=.*?>')
if item.type != 'normal' and (len(itemlist)>19 or next_page_valid):
type = item.type
if item.type == 'serie':
type = 'recents'
if next_page_valid:
page = str(int(next_page_valid[0])+1)
if item.type != 'recents':
id = next_page_valid[1]
type = next_page_valid[2]
else:
id =''
else:
page = str(int(item.page)+1)
id = item.id
if type =='recents':
type_pagination = '/series/pagination'
else:
type_pagination = '/pagination'
url = host+type_pagination
itemlist.append(item.clone(action = "list_all",
title = 'Siguiente >>>',
page=page,
url = url,
id = id,
type = type
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.seccion == 'generos':
patron = '<li><a href=(.*?)><i class=ion-cube><\/i>(.*?)<\/span>'
type = 'genre'
elif item.seccion == 'anios':
patron = '<li><a href=(\/peliculas.*?)>(\d{4})<\/a>'
type = 'year'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if item.seccion == 'generos':
cant = re.sub(r'.*?<span class=cant-genre>','',scrapedtitle)
only_title = re.sub(r'<.*','',scrapedtitle).rstrip()
title = only_title+' (%s)'%cant
url = host+scrapedurl
itemlist.append(
Item(channel=item.channel,
action="list_all",
title=title,
fulltitle=item.title,
url=url,
type = 'seccion'
))
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <')
next_page_url = host + next_page
import inspect
if next_page != '':
itemlist.append(item.clone(action="seccion",
title='Siguiente >>>',
url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
))
return itemlist
def seasons(item):
logger.info()
itemlist =[]
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron ='<i class=ion-chevron-down arrow><\/i>(.*?)<\/div>'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for title in matches:
season = title.replace('Temporada ','')
infoLabels['season'] = season
itemlist.append(Item(
channel=item.channel,
title=title,
url=item.url,
action='season_episodes',
contentSerieName= item.contentSerieName,
contentSeasonNumber = season,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def season_episodes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
season = str(item.infoLabels['season'])
patron = '<a href=(.*?temporada-%s\/.*?) title=.*?i-play><\/i> (.*?)<\/a>'%season
matches = matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for url, episode in matches:
episodenumber = re.sub('C.* ','',episode)
infoLabels['episode'] = episodenumber
itemlist.append(Item(channel=item.channel,
title= episode,
url = host+url,
action = 'findvideos',
infoLabels=infoLabels,
contentEpisodeNumber=episode
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def get_links_by_language(item, data):
logger.info()
video_list = []
language = scrapertools.find_single_match(data, 'ul id=level\d_(.*?)\s*class=')
patron = 'data-source=(.*?)data.*?srt=(.*?)data-iframe.*?Opci.*?<.*?hidden>[^\(]\((.*?)\)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, sub, quality in matches:
if 'http' not in url:
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)
video_list.extend(servertools.find_video_items(data=data))
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
video_url.title = item.title + '(%s) (%s)' % ('', video_url.server)
if video_url.language == '':
video_url.language = language
video_url.subtitle = sub
video_url.contentTitle = item.contentTitle
else:
video_list.append(item.clone(title='%s [%s] [%s]',
url=url,
action='play',
quality=quality,
language=language,
subtitle=sub
))
return video_list
def findvideos(item):
logger.info()
itemlist = []
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron_language ='(<ul id=level\d_.*?\s*class=.*?ul>)'
matches = re.compile(patron_language, re.DOTALL).findall(data)
for language in matches:
video_list.extend(get_links_by_language(item, language))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % (i.server.capitalize(), i.language,i.quality) )
return video_list

85
plugin.video.alfa/channels/ultrapeliculashd.py Executable file → Normal file
View File

@@ -5,6 +5,7 @@ import re
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -106,13 +107,19 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.extra != 'buscar':
patron = '<article id=post-.*? class=item movies><div class=poster><a href=(.*?)><img src=(.*?) '
patron += 'alt=(.*?)>.*?quality>.*?<.*?<\/h3><span>(.*?)<\/span>'
patron = '<article id=.*?<img src=(.*?) alt=(.*?)>.*?<a href=(.*?)>.*?</h3><span>(.*?)<'
else:
patron = '<article><div class=image>.*?<a href=(.*?)\/><img src=(.*?) alt=(.*?) \/>.*?year>(.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
if item.extra == 'buscar':
aux = scrapedthumbnail
scrapedthumbnail=scrapedtitle
scrapedtitle = scrapedurl
scrapedurl = aux
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = re.sub(r'\d{4}', '', scrapedtitle)
@@ -121,6 +128,8 @@ def lista(item):
title = scrapertools.decodeHtmlentities(contentTitle)
year = scrapedyear
fanart = ''
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -140,15 +149,16 @@ def generos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<li class=cat-item cat-item-.*?><a href=(.*?) >(.*?)<\/a> <i>(.*?)<\/i><\/li>'
logger.debug(data)
patron = 'genres menu-item-.*?><a href=(.*?)>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, cantidad in matches:
for scrapedurl, scrapedtitle in matches:
thumbnail = ''
fanart = ''
if scrapedtitle in tgenero:
thumbnail = tgenero[scrapedtitle]
title = scrapedtitle + ' (' + cantidad + ')'
title = scrapedtitle
url = scrapedurl
if scrapedtitle not in ['PRÓXIMAMENTE', 'EN CINE']:
itemlist.append(item.clone(action="lista",
@@ -166,23 +176,38 @@ def seccion(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.extra == 'year':
patron = '<li><a href=(.*?\/fecha-estreno.*?)>(.*?)<\/a>'
else:
patron = '<li><a href=(.*?) >(.*?)<\/a><\/li>'
patron = 'glossary=(.*?)>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
for scrapedid, scrapedtitle in matches:
thumbnail = ''
if scrapedtitle.lower() in thumbletras:
thumbnail = thumbletras[scrapedtitle.lower()]
fanart = ''
title = scrapedtitle
url = scrapedurl
id = scrapedid
itemlist.append(
Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail,
fanart=fanart))
Item(channel=item.channel, action="alpha", title=title, fulltitle=item.title, thumbnail=thumbnail,
fanart=fanart, id = id))
return itemlist
def alpha(item):
logger.info()
itemlist = []
url = 'https://www.ultrapeliculashd.com/wp-json/dooplay/glossary/?term=%s&nonce=4e850b7d59&type=all' % item.id
data = httptools.downloadpage(url).data
dict_data = jsontools.load(data)
logger.debug(dict_data)
for elem in dict_data:
logger.debug(dict_data[elem])
elem = dict_data[elem]
itemlist.append(Item(channel=item.channel, action='findvideos', title = elem['title'], url=elem['url'],
thumbnail=elem['img']))
return itemlist
@@ -191,23 +216,32 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<iframe class=metaframe rptss src=(.*?) (?:width=.*?|frameborder=0) allowfullscreen><\/iframe>'
#logger.debug(data)
patron = '<iframe.*?rptss src=(.*?) (?:width.*?|frameborder.*?) allowfullscreen><\/iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for video_url in matches:
logger.debug('video_url: %s' % video_url)
if 'stream' in video_url:
data = httptools.downloadpage('https:'+video_url).data
new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
new_data = httptools.downloadpage(new_url).data
logger.debug(data)
if not 'iframe' in video_url:
new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
new_data = httptools.downloadpage(new_url).data
logger.debug('new_data %s' % new_data)
url= ''
try:
url, quality = scrapertools.find_single_match(new_data, 'file:.*?(?:\"|\')(https.*?)(?:\"|\'),'
'label:.*?(?:\"|\')(.*?)(?:\"|\'),')
except:
pass
if url != '':
headers_string = '|Referer=%s' % url
url = url.replace('download', 'preview')+headers_string
url, quality = scrapertools.find_single_match(new_data, 'file:.*?"(.*?)",label:.*?"(.*?)"')
headers_string = '|Referer=%s' % url
url = url.replace('download', 'preview')+headers_string
sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"')
new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
subtitle=sub))
itemlist.append(new_item)
sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"')
new_item = (Item(title=item.title, url=url, quality=quality, subtitle=sub, server='directo'))
itemlist.append(new_item)
else:
itemlist.extend(servertools.find_video_items(data=video_url))
@@ -220,6 +254,7 @@ def findvideos(item):
if 'youtube' in videoitem.url:
videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]'
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(