From 7ed0102ca483e938daea1e0fc7f689d2242561d8 Mon Sep 17 00:00:00 2001
From: alaquepasa <39385022+alaquepasa@users.noreply.github.com>
Date: Mon, 21 May 2018 17:35:09 +0200
Subject: [PATCH] Cambios PepeCine:
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Nuevo Dominio
- Sección de series rehecha y activada
---
plugin.video.alfa/channels/pepecine.json | 10 +-
plugin.video.alfa/channels/pepecine.py | 380 ++++++++++++++---------
2 files changed, 234 insertions(+), 156 deletions(-)
diff --git a/plugin.video.alfa/channels/pepecine.json b/plugin.video.alfa/channels/pepecine.json
index aa3d4301..a29e490f 100755
--- a/plugin.video.alfa/channels/pepecine.json
+++ b/plugin.video.alfa/channels/pepecine.json
@@ -28,14 +28,6 @@
"enabled": true,
"visible": true
},
- {
- "id": "include_in_newest_infantiles",
- "type": "bool",
- "label": "Incluir en Novedades - Infantiles",
- "default": true,
- "enabled": true,
- "visible": true
- },
{
"id": "include_in_newest_series",
"type": "bool",
@@ -45,4 +37,4 @@
"visible": true
}
]
-}
\ No newline at end of file
+}
diff --git a/plugin.video.alfa/channels/pepecine.py b/plugin.video.alfa/channels/pepecine.py
index e5025687..c59df19d 100755
--- a/plugin.video.alfa/channels/pepecine.py
+++ b/plugin.video.alfa/channels/pepecine.py
@@ -14,44 +14,60 @@ from core import tmdb
from core.item import Item, InfoLabels
from platformcode import config, logger
-host = "https://pepecinehd.tv"
-perpage = 20
+host = "https://pepecine.io"
-def mainlist1(item):
- logger.info()
- itemlist = []
- itemlist.append(Item(channel=item.channel, title="Películas", action='movies_menu'))
- #itemlist.append(item.clone(title="Series", action='tvshows_menu'))
- return itemlist
+IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'su': 'VOSE', 'vo': 'VO', 'otro': 'OVOS'}
+list_idiomas = IDIOMAS.values()
+list_language = ['default']
+
+CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
+list_quality = CALIDADES
+
+
+perpage = 20
def mainlist(item):
logger.info()
itemlist = []
- itemlist.append(Item(channel=item.channel,
- title="Ultimas",
- url=host+'/tv-peliculas-online',
- action='list_latest',
- indexp=1,
- type='movie'))
- itemlist.append(Item(channel=item.channel,
- title="Todas",
- url= host+'/ver-online',
- action='list_all',
- page='1',
- type='movie'))
- itemlist.append(Item(channel=item.channel,
- title="Género",
- url= host,
- action='genero',
- page='1',
- type='movie'))
- itemlist.append(Item(channel=item.channel, title = "", action =""))
- itemlist.append(Item(channel=item.channel,
- title="Buscar",
- url= host+'/esta-online?q=',
- action='search',
- page='1',
- type='movie'))
+ itemlist.append(Item(title = "Películas"))
+
+ itemlist.append(item.clone(
+ title = " Últimas películas",
+ url = host + '/las-peliculas-online',
+ action = 'list_latest',
+ type = 'movie'))
+
+ itemlist.append(item.clone(title = " Películas por género",
+ url = host + '/ver-pelicula',
+ action = 'genero',
+ type = 'movie'))
+
+ itemlist.append(item.clone(title = " Todas las películas",
+ url = host + '/ver-pelicula',
+ action = 'list_all',
+ type = 'movie'))
+
+ itemlist.append(Item(title = "Series"))
+
+ itemlist.append(item.clone(title = " Últimas series",
+ url = host + '/las-series-online',
+ action = 'list_latest',
+ type = 'series'))
+
+ itemlist.append(item.clone(title = " Series por género",
+ url = host + '/ver-serie-tv',
+ action = 'genero',
+ type = 'series'))
+
+ itemlist.append(item.clone(title = " Todas las series",
+ url = host + '/ver-serie-tv',
+ action ='list_all',
+ type = 'series'))
+
+ itemlist.append(item.clone(title = "Buscar",
+ url = host + '/donde-ver?q=',
+ action ='search',
+ type = 'movie'))
return itemlist
@@ -59,40 +75,25 @@ def genero(item):
logger.info()
itemlist=[]
data = httptools.downloadpage(item.url).data
- data = data.replace("\n","")
- bloque = scrapertools.find_single_match(data, 'Peliculas
", data, re.MULTILINE | re.DOTALL)
+
+ logger.info("Search sections = {0}".format(len(searchSections)))
+ itemlist.extend(search_section(item, searchSections[0], "movies"))
+ itemlist.extend(search_section(item, searchSections[1], "series"))
+
+ tmdb.set_infoLabels(itemlist)
+ for i in itemlist:
+ logger.info(i.tojson())
+ return itemlist
def get_source(url):
logger.info()
@@ -147,6 +144,10 @@ def get_source(url):
def list_latest(item):
logger.info()
+
+ if not item.indexp:
+ item.indexp = 1
+
itemlist = []
data = get_source(item.url)
data_url= scrapertools.find_single_match(data,'= item.indexp and count < item.indexp + perpage:
- path = scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)")
- filtro_list = {"poster_path": path}
- filtro_list = filtro_list.items()
- itemlist.append(Item(channel=item.channel,
- title=title,
- fulltitle=title,
- contentTitle=title,
- url=host+url,
- thumbnail=thumbnail,
- language=language,
- infoLabels={'filtro': filtro_list},
- extra="one",
- action='findvideos'))
- tmdb.set_infoLabels(itemlist)
- item.indexp += perpage
- itemlist.append(Item(channel=item.channel,
- title="Siguiente >>",
- url=item.url,
- extra="one",
- indexp=item.indexp,
- action='list_latest'))
- return itemlist
+ count += 1
+ if count < item.indexp:
+ continue
+
+ if count >= item.indexp + perpage:
+ break;
+
+ path = scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)")
+ filtro_list = {"poster_path": path}
+ filtro_list = filtro_list.items()
+ itemlist.append(item.clone(action = 'findvideos',
+ title = title,
+ url = host + url,
+ thumbnail = thumbnail,
+ language = language,
+ infoLabels = {'filtro': filtro_list},
+ )
+ )
+ tmdb.set_infoLabels(itemlist)
+
+ # Desde novedades no tenemos el elemento item.channel
+ if item.channel:
+ itemlist.append(item.clone(title = "Página siguiente >>>",
+ indexp = item.indexp + perpage
+ )
+ )
+ if item.indexp > 1:
+ itemlist.append(item.clone(title = "<<< Página anterior",
+ indexp = item.indexp - perpage
+ )
+ )
+
+ return itemlist
def list_all(item):
logger.info()
itemlist=[]
+
+ if not item.page:
+ item.page = 1
+
genero = scrapertools.find_single_match(item.url, "genre=(\w+)")
data= get_source(item.url)
token = scrapertools.find_single_match(data, "token:.*?'(.*?)'")
- url = host+'/titles/paginate?_token=%s&perPage=24&page=%s&order=mc_num_of_votesDesc&type=%s&minRating=&maxRating=&availToStream=1&genres[]=%s' % (token, item.page, item.type, genero)
+ url = host+'/titles/paginate?_token=%s&perPage=%d&page=%d&order=mc_num_of_votesDesc&type=%s&minRating=&maxRating=&availToStream=1&genres[]=%s' % (token, perpage, item.page, item.type, genero)
data = httptools.downloadpage(url).data
+
+ if item.type == "series":
+ # Remove links to speed-up (a lot!) json load
+ data = re.sub(",? *[\"']link[\"'] *: *\[.+?\] *([,}])", "\g<1>", data)
+
dict_data = jsontools.load(data)
items = dict_data['items']
- for dict in items:
- new_item = Item(channel=item.channel,
- title=dict['title']+' [%s]' % dict['year'],
- plot = dict['plot'],
- thumbnail=dict['poster'],
- url=dict['link'],
- infoLabels={'year':dict['year']})
+
+ for element in items:
+ new_item = item.clone(
+ title = element['title']+' [%s]' % element['year'],
+ plot = element['plot'],
+ thumbnail = element['poster'],
+ infoLabels = {'year':element['year']})
+
+ if "link" in element:
+ new_item.url = element["link"]
+ new_item.extra = "links_encoded"
+
if item.type == 'movie':
- new_item.contentTitle=dict['title']
- new_item.fulltitle=dict['title']
new_item.action = 'findvideos'
+ new_item.contentTitle = element['title']
+ new_item.fulltitle = element['title']
+ if new_item.extra != "links_encoded":
+ new_item.url = host + "/ver-pelicula/" + str(element['id'])
+
elif item.type == 'series':
- new_item.contentSerieName = dict['title']
- new_item.action = ''
+ new_item.action = 'seasons'
+ new_item.url = host + "/ver-serie-tv/" + str(element['id'])
+ new_item.show = element['title']
+
itemlist.append(new_item)
+
tmdb.set_infoLabels(itemlist)
- itemlist.append(item.clone(title='Siguiente>>>',
- url=item.url,
- action='list_all',
- type= item.type,
- page=str(int(item.page) + 1)))
+
+ itemlist.append(item.clone(title = 'Página siguiente >>>',
+ page = item.page + 1))
+
+ if (int(item.page) > 1):
+ itemlist.append(item.clone(title = '<<< Página anterior',
+ page = item.page - 1))
+
return itemlist
+def seasons(item):
+ logger.info()
+ data = httptools.downloadpage(item.url).data
+# Temporada 1
+
+ reSeasons = re.findall("href *= *[\"']([^\"']+)[\"'][^\"']+[\"']sezon[^>]+>([^<]+)+", data)
+
+ result = [item.clone(action = "seasons_episodes",
+ title = title,
+ url = url) for url, title in reSeasons]
+
+ if len(result) == 1:
+ result = seasons_episodes(result[0])
+
+ return result
+
+def seasons_episodes(item):
+ logger.info()
+
+ data = httptools.downloadpage(item.url).data
+ reEpisodes = re.findall("]+col-sm-3[^>]+href *= *[\"'](?P[^\"']+).*?
]+src *= *[\"'](?P[^\"']+).*?]+>(?P.*?)", data, re.MULTILINE | re.DOTALL)
+
+ seasons = [item.clone(action = "findvideos",
+ title = title,
+ thumbnail = thumbnail,
+ url = url) for url, thumbnail, title in reEpisodes]
+
+ return seasons
+
def findvideos(item):
logger.info()
itemlist=[]
- if item.extra == "one":
+
+ if item.extra != "links_encoded":
+
+ data = httptools.downloadpage(item.url).data
+ linksRE = re.findall("getFavicon\('(?P[^']+)[^>]+>[^>]+>(?P[^<]+).+?]+>(?P[^<]*).+?]+>(?P[^<]*)", data, re.MULTILINE | re.DOTALL)
+ for url, language, quality, antiquity in linksRE:
+ logger.info("URL = " + url);
+
+
data = httptools.downloadpage(item.url).data
patron = "renderTab.bind.*?'([^']+).*?"
- patron += "app.utils.getFavicon.*?(.*?) .*?"
+ patron += "app.utils.getFavicon.*? ]*src *= *[\"']/([^\.]+).*?"
patron += 'color:#B1FFC5;">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedlanguage, scrapedquality in matches:
- title = "Ver enlace en %s " + "[" + scrapedlanguage + "]" + "[" + scrapedquality + "]"
- if scrapedlanguage != 'zc':
- itemlist.append(item.clone(action='play',
- title=title,
- url=scrapedurl,
- language=scrapedlanguage
- ))
+ for scrapedurl, language, scrapedquality in matches:
+ isDD = language.startswith("z")
+ if isDD:
+ language = language[1:]
+
+ language = language[0:2]
+ language = IDIOMAS.get(language, language)
+
+ title = ("Ver" if not isDD else "Descargar") + " enlace en %s [" + language + "] [" + scrapedquality + "]"
+ if not isDD:
+ itemlist.append(item.clone(action = 'play',
+ title = title,
+ url = scrapedurl,
+ language = language
+ )
+ )
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
else:
for link in item.url:
- language = scrapertools.find_single_match(link['label'], '(.*?) ![item.title,]() | |