fix: canali ripristinati

This commit is contained in:
greko17
2019-08-05 10:43:56 +02:00
parent 76130a91f6
commit 3b8a20fbd7
4 changed files with 370 additions and 14 deletions

View File

@@ -3,6 +3,12 @@
# Canale per altadefinizione01
# ------------------------------------------------------------
"""
DA FINIRE - CONTROLLARE
"""
from specials import autoplay
from core import servertools, support, jsontools
from core.item import Item
@@ -38,10 +44,10 @@ def peliculas(item):
support.log('peliculas',item)
action="findvideos"
## if item.args == "search":
## patronBlock = r'</script> <div class="boxgrid caption">(.*?)<div id="right_bar">'
## else:
## patronBlock = r'<div class="cover_kapsul ml-mask">(.*?)<div class="page_nav">'
if item.args == "search":
patronBlock = r'</script> <div class="boxgrid caption">(.*?)<div id="right_bar">'
else:
patronBlock = r'<div class="cover_kapsul ml-mask">(.*?)<div class="page_nav">'
patron = r'<div class="cover boxcaption"> <h2>.<a href="(?P<url>[^"]+)">.*?<.*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+<div class="trdublaj"> (?P<quality>[A-Z]+)<[^>]+>(?:.[^>]+>(?P<lang>.*?)<[^>]+>).*?'\
'<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> '\
@@ -60,7 +66,7 @@ def categorie(item):
if item.args != 'orderalf': action = "peliculas"
else: action = 'orderalf'
blacklist = 'Altadefinizione01'
blacklist = 'altadefinizione01'
if item.args == 'genres':
patronBlock = r'<ul class="kategori_list">(.*?)</ul>'

View File

@@ -2,7 +2,9 @@
# ------------------------------------------------------------
# Canale per animeleggendari
# ------------------------------------------------------------
"""
DA COMPLETARE - CONTROLLARE
"""
import re
from core import servertools, httptools, scrapertoolsV2, tmdb, support

View File

@@ -8,14 +8,6 @@
"banner": "serietvsubita.png",
"categories": ["tvshow"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "http://serietvsubita.xyz",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",

356
channels/serietvsubita.py Normal file
View File

@@ -0,0 +1,356 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Serietvsubita
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ----------------------------------------------------------
import re
import time
from core import httptools, tmdb, scrapertools, support
from core.item import Item
from core.support import log
from platformcode import logger, config
__channel__ = "serietvsubita"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['gounlimited', 'verystream', 'streamango', 'openload']
list_quality = ['default']
@support.menu
def mainlist(item):
log()
itemlist = []
tvshowSub = [
('Novità bold',[ '', 'peliculas_tv', '', 'tvshow']),
('Serie TV bold',[ '', 'lista_serie', '', 'tvshow']),
('Per Lettera', ['', 'list_az', 'serie', 'tvshow'])
]
## support.aplay(item, itemlist, list_servers, list_quality)
## support.channel_config(item, itemlist)
return locals()
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('Game of Thrones ','')\
.replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
log()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data, r'<div class="entry">([\s\S.]*?)<div class="post').replace('..:: Episodio ', 'Episodio ').strip()
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode + ' ', 'Episodio ' + episode + ' ')
blocco = blocco.replace('Episodio ', '..:: Episodio ')
episodio = item.infoLabels['episode']
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
log(patron)
log(blocco)
matches = scrapertools.find_multiple_matches(blocco, patron)
if len(matches):
data = matches[0][0]
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers2 = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers2).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
log()
itemlist = []
PERPAGE = 15
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
if '||' in item.url:
series = item.url.split('\n\n')
matches = []
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Extrae las entradas
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
scrapedplot = ""
scrapedthumbnail = ""
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
if len(matches) >= p * PERPAGE:
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item, itemlist=[]):
log()
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += r'<p><a href="([^"]+)">'
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
if "(Completa)" in scrapedtitle:
data = httptools.downloadpage(scrapedurl, headers=headers).data
scrapedtitle = scrapedtitle.replace(" Miniserie", " Stagione 1")
title = scrapedtitle.split(" Stagione")[0].strip()
# recupero la stagione
season = scrapertools.find_single_match(scrapedtitle, 'Stagione ([0-9]*)')
blocco = scrapertools.find_single_match(data, '<div class="entry">[\s\S.]*?<div class="post')
blocco = blocco.replace('<strong>Episodio ', '<strong>Episodio ').replace(' </strong>', ' </strong>')
blocco = blocco.replace('<strong>Episodio ', '<strong>S' + season.zfill(2) + 'E')
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
episodes = []
if len(matches) > 0:
for fullepisode_s, season, episode in matches:
season = season.lstrip("0")
episodes.append([
"".join([season, "x", episode]),
season,
episode
])
else:
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
episodes = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
for fullepisode, season, episode in episodes:
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode
fullepisode += ' ' + support.typo("Sub-ITA", '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=fullepisode,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
contentSerieName=title,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazionazione
patron = r'<strong class="on">\d+</strong>\s*<a href="([^<]+)">\d+</a>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
item.url = next_page
itemlist = episodios(item, itemlist)
else:
item.url = item.originalUrl
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas_tv(item):
log()
itemlist = []
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]:
continue
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
infoLabels = {}
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
if episode: # workaround per quando mettono le serie intere o altra roba, sarebbero da intercettare TODO
episode = episode[0]
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2].zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patron = r'<strong class="on">\d+</strong>\s<a href="([^<]+)">\d+</a>'
support.nextPage(itemlist, item, data, patron)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
log(categoria)
itemlist = []
item = Item()
item.url = host
item.extra = 'serie'
try:
if categoria == "series":
itemlist = peliculas_tv(item)
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
log(texto)
itemlist = []
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if texto.upper() in scrapedtitle.upper():
scrapedthumbnail = ""
scrapedplot = ""
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def list_az(item):
log()
itemlist = []
alphabet = dict()
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist
# ================================================================================================================