* fix next page
* testing new filmontv
* Wstream quick fix, no resolution displayed :(
* new filmontv
* now regex is ok
* fix .po files
* +netlovers
* working on filmontv
* fix debriders
* new updater
* updater
* fix crash
* fix updater and re-add dev mode
* new url eurostreaming
* Delete netlovers.py
* Delete netlovers.json
* -net from menù
* fix eurostreaming: numero stagione e newest (#50)
* fix canale
* fix newest
* fix numero puntata
* cleanup
* cleanup 2
* fix updater crash on windows
* Fix Animeworld
* Nuovo Autorenumber
* initial background downloader support
* ops
* Update channels.json
* Update channels.json
* fix openload
* move json update to cohesist with updater
* disable json url updates
* fix typo
* fix typo 2
* Add files via upload
* Add files via upload
* fix autoplay in community channels
* fix toonitalia
* Fix Toonitalia
* workaround serietvsubita
* Nuova Rinumerazione Automatica
* Fix per Rinumerazione Automatica
* workaround updater
* Fix on air
* ops
* Personalizzazione sezione "Oggi in TV"
* Aggiunto orario sezione Oggi in TV
* aggiunto bit.ly (#56)
* aggiunto bit.ly
* Aggiunta personalizzazione homepage
* Revert "initial background downloader support"
This reverts commit f676ab0f
* KoD 0.4
354 lines
13 KiB
Python
354 lines
13 KiB
Python
# -*- coding: utf-8 -*-
|
||
# ------------------------------------------------------------
|
||
# Canale per Serietvsubita
|
||
# Thanks to Icarus crew & Alfa addon & 4l3x87
|
||
# ----------------------------------------------------------
|
||
|
||
import re
|
||
import time
|
||
|
||
from core import httptools, tmdb, scrapertools, support
|
||
from core.item import Item
|
||
from core.support import log
|
||
from platformcode import logger, config
|
||
|
||
__channel__ = "serietvsubita"
|
||
host = config.get_channel_url(__channel__)
|
||
headers = [['Referer', host]]
|
||
|
||
IDIOMAS = {'Italiano': 'IT'}
|
||
list_language = IDIOMAS.values()
|
||
list_servers = ['gounlimited', 'verystream', 'streamango', 'openload']
|
||
list_quality = ['default']
|
||
|
||
|
||
def mainlist(item):
|
||
log()
|
||
itemlist = []
|
||
support.menu(itemlist, 'Novità bold', 'peliculas_tv', host, 'tvshow')
|
||
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow')
|
||
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie'])
|
||
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow')
|
||
support.aplay(item, itemlist, list_servers, list_quality)
|
||
support.channel_config(item, itemlist)
|
||
|
||
return itemlist
|
||
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
def cleantitle(scrapedtitle):
|
||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('Game of Thrones –','')\
|
||
.replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip()
|
||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||
if year:
|
||
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
|
||
|
||
return scrapedtitle.strip()
|
||
|
||
|
||
# ================================================================================================================
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
def findvideos(item):
|
||
log()
|
||
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
|
||
data = re.sub(r'\n|\t|\s+', ' ', data)
|
||
# recupero il blocco contenente i link
|
||
blocco = scrapertools.find_single_match(data, r'<div class="entry">([\s\S.]*?)<div class="post').replace('..:: Episodio ', 'Episodio ').strip()
|
||
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
|
||
if len(matches) > 0:
|
||
for fullseasonepisode, season, episode in matches:
|
||
blocco = blocco.replace(fullseasonepisode + ' ', 'Episodio ' + episode + ' ')
|
||
|
||
blocco = blocco.replace('Episodio ', '..:: Episodio ')
|
||
|
||
episodio = item.infoLabels['episode']
|
||
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
|
||
log(patron)
|
||
log(blocco)
|
||
|
||
matches = scrapertools.find_multiple_matches(blocco, patron)
|
||
if len(matches):
|
||
data = matches[0][0]
|
||
|
||
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
|
||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||
for keeplinks, id in matches:
|
||
headers2 = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
|
||
['Referer', keeplinks]]
|
||
|
||
html = httptools.downloadpage(keeplinks, headers=headers2).data
|
||
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
|
||
|
||
return support.server(item, data=data)
|
||
|
||
# ================================================================================================================
|
||
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
def lista_serie(item):
|
||
log()
|
||
itemlist = []
|
||
|
||
PERPAGE = 15
|
||
|
||
p = 1
|
||
if '{}' in item.url:
|
||
item.url, p = item.url.split('{}')
|
||
p = int(p)
|
||
|
||
if '||' in item.url:
|
||
series = item.url.split('\n\n')
|
||
matches = []
|
||
for i, serie in enumerate(series):
|
||
matches.append(serie.split('||'))
|
||
else:
|
||
# Extrae las entradas
|
||
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||
matches = support.match(item, patron, headers=headers)[0]
|
||
|
||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||
scrapedplot = ""
|
||
scrapedthumbnail = ""
|
||
if (p - 1) * PERPAGE > i: continue
|
||
if i >= p * PERPAGE: break
|
||
title = cleantitle(scrapedtitle)
|
||
itemlist.append(
|
||
Item(channel=item.channel,
|
||
extra=item.extra,
|
||
action="episodios",
|
||
title=title,
|
||
url=scrapedurl,
|
||
thumbnail=scrapedthumbnail,
|
||
fulltitle=title,
|
||
show=title,
|
||
plot=scrapedplot,
|
||
contentType='episode',
|
||
originalUrl=scrapedurl,
|
||
folder=True))
|
||
|
||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||
|
||
# Paginazione
|
||
if len(matches) >= p * PERPAGE:
|
||
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
|
||
|
||
return itemlist
|
||
|
||
|
||
# ================================================================================================================
|
||
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
def episodios(item, itemlist=[]):
|
||
log()
|
||
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
|
||
patron += r'<p><a href="([^"]+)">'
|
||
|
||
matches, data = support.match(item, patron, headers=headers)
|
||
|
||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||
scrapedplot = ""
|
||
scrapedtitle = cleantitle(scrapedtitle)
|
||
if "(Completa)" in scrapedtitle:
|
||
data = httptools.downloadpage(scrapedurl, headers=headers).data
|
||
scrapedtitle = scrapedtitle.replace(" – Miniserie", " – Stagione 1")
|
||
title = scrapedtitle.split(" – Stagione")[0].strip()
|
||
|
||
# recupero la stagione
|
||
season = scrapertools.find_single_match(scrapedtitle, 'Stagione ([0-9]*)')
|
||
blocco = scrapertools.find_single_match(data, '<div class="entry">[\s\S.]*?<div class="post')
|
||
blocco = blocco.replace('<strong>Episodio ', '<strong>Episodio ').replace(' </strong>', ' </strong>')
|
||
blocco = blocco.replace('<strong>Episodio ', '<strong>S' + season.zfill(2) + 'E')
|
||
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
|
||
episodes = []
|
||
if len(matches) > 0:
|
||
for fullepisode_s, season, episode in matches:
|
||
season = season.lstrip("0")
|
||
|
||
episodes.append([
|
||
"".join([season, "x", episode]),
|
||
season,
|
||
episode
|
||
])
|
||
|
||
else:
|
||
title = scrapedtitle.split(" S0")[0].strip()
|
||
title = title.split(" S1")[0].strip()
|
||
title = title.split(" S2")[0].strip()
|
||
episodes = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
|
||
|
||
for fullepisode, season, episode in episodes:
|
||
infoLabels = {}
|
||
infoLabels['season'] = season
|
||
infoLabels['episode'] = episode
|
||
fullepisode += ' ' + support.typo("Sub-ITA", '_ [] color kod')
|
||
itemlist.append(
|
||
Item(channel=item.channel,
|
||
extra=item.extra,
|
||
action="findvideos",
|
||
fulltitle=scrapedtitle,
|
||
show=scrapedtitle,
|
||
title=fullepisode,
|
||
url=scrapedurl,
|
||
thumbnail=scrapedthumbnail,
|
||
plot=scrapedplot,
|
||
contentSerieName=title,
|
||
infoLabels=infoLabels,
|
||
folder=True))
|
||
|
||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||
|
||
# Paginazionazione
|
||
patron = r'<strong class="on">\d+</strong>\s*<a href="([^<]+)">\d+</a>'
|
||
next_page = scrapertools.find_single_match(data, patron)
|
||
if next_page != "":
|
||
item.url = next_page
|
||
itemlist = episodios(item, itemlist)
|
||
else:
|
||
item.url = item.originalUrl
|
||
support.videolibrary(itemlist, item, 'bold color kod')
|
||
|
||
return itemlist
|
||
|
||
|
||
# ================================================================================================================
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
def peliculas_tv(item):
|
||
log()
|
||
itemlist = []
|
||
|
||
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
|
||
|
||
matches, data = support.match(item, patron, headers=headers)
|
||
|
||
for scrapedurl, scrapedtitle in matches:
|
||
if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]:
|
||
continue
|
||
|
||
scrapedthumbnail = ""
|
||
scrapedplot = ""
|
||
scrapedtitle = cleantitle(scrapedtitle)
|
||
infoLabels = {}
|
||
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
|
||
if episode: # workaround per quando mettono le serie intere o altra roba, sarebbero da intercettare TODO
|
||
episode = episode[0]
|
||
title = scrapedtitle.split(" S0")[0].strip()
|
||
title = title.split(" S1")[0].strip()
|
||
title = title.split(" S2")[0].strip()
|
||
|
||
infoLabels['season'] = episode[1]
|
||
infoLabels['episode'] = episode[2].zfill(2)
|
||
|
||
itemlist.append(
|
||
Item(channel=item.channel,
|
||
action="findvideos",
|
||
fulltitle=scrapedtitle,
|
||
show=scrapedtitle,
|
||
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
|
||
url=scrapedurl,
|
||
thumbnail=scrapedthumbnail,
|
||
contentSerieName=title,
|
||
contentLanguage='Sub-ITA',
|
||
plot=scrapedplot,
|
||
infoLabels=infoLabels,
|
||
folder=True))
|
||
|
||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||
|
||
# Paginazione
|
||
patron = r'<strong class="on">\d+</strong>\s<a href="([^<]+)">\d+</a>'
|
||
support.nextPage(itemlist, item, data, patron)
|
||
|
||
return itemlist
|
||
|
||
|
||
# ================================================================================================================
|
||
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
def newest(categoria):
|
||
log(categoria)
|
||
itemlist = []
|
||
item = Item()
|
||
item.url = host
|
||
item.extra = 'serie'
|
||
try:
|
||
if categoria == "series":
|
||
itemlist = peliculas_tv(item)
|
||
|
||
except:
|
||
import sys
|
||
for line in sys.exc_info():
|
||
logger.error("{0}".format(line))
|
||
return []
|
||
|
||
return itemlist
|
||
|
||
|
||
# ================================================================================================================
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
def search(item, texto):
|
||
log(texto)
|
||
itemlist = []
|
||
|
||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||
matches = support.match(item, patron, headers=headers)[0]
|
||
|
||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||
if texto.upper() in scrapedtitle.upper():
|
||
scrapedthumbnail = ""
|
||
scrapedplot = ""
|
||
title = cleantitle(scrapedtitle)
|
||
itemlist.append(
|
||
Item(channel=item.channel,
|
||
extra=item.extra,
|
||
action="episodios",
|
||
title=title,
|
||
url=scrapedurl,
|
||
thumbnail=scrapedthumbnail,
|
||
fulltitle=title,
|
||
show=title,
|
||
plot=scrapedplot,
|
||
contentType='episode',
|
||
originalUrl=scrapedurl,
|
||
folder=True))
|
||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||
|
||
return itemlist
|
||
|
||
|
||
# ================================================================================================================
|
||
|
||
# ----------------------------------------------------------------------------------------------------------------
|
||
|
||
|
||
def list_az(item):
|
||
log()
|
||
itemlist = []
|
||
|
||
alphabet = dict()
|
||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||
matches = support.match(item, patron, headers=headers)[0]
|
||
|
||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||
letter = scrapedtitle[0].upper()
|
||
if letter not in alphabet:
|
||
alphabet[letter] = []
|
||
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
|
||
|
||
for letter in sorted(alphabet):
|
||
itemlist.append(
|
||
Item(channel=item.channel,
|
||
action="lista_serie",
|
||
url='\n\n'.join(alphabet[letter]),
|
||
title=letter,
|
||
fulltitle=letter))
|
||
|
||
return itemlist
|
||
|
||
# ================================================================================================================
|