Merge branch 'master' into master

This commit is contained in:
mac12m99
2019-10-21 18:32:49 +02:00
committed by GitHub
58 changed files with 1551 additions and 921 deletions

View File

@@ -45,7 +45,7 @@ Per aprirli non servono programmi particolari un semplice editor di testo è suf
Occorrente: file .json
**1. Indica la coerenza delle voci presenti in "language" con i contenuti presenti sul sito:**
valori: ita, vosi (sub-ita)
valori: ita, sub-ita (sub-ita)
- [ ] coerenti
- [ ] non coerenti
@@ -93,9 +93,9 @@ Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati
- [ ] OK
- indica il tipo di problema
**Sezione FILM (se il sito non ha film elimina questa parte)
**Sezione FILM (se il sito non ha film elimina questa parte)**
**TestN.3: Pagina dei Titoli
**TestN.3: Pagina dei Titoli**
*Test da effettuare mentre sei dentro un menu del canale (film, serietv, in corso ecc..)*.
Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai le seguenti voci, nel menu contestuale (tasto c o tenendo enter premuto):
@@ -109,9 +109,9 @@ Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai
- [ ] Si
- [ ] No
**Fine test menu contestuale
**Fine test menu contestuale**
**Fondo pagina dei titoli
**Fondo pagina dei titoli**
**3. Paginazione, controlla ci sia la voce "Successivo" (se non c'è controlla sul sito se è presente)**
@@ -128,7 +128,7 @@ Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai
**5. Eventuali problemi riscontrati**
- scrivi qui il problema/i
**Sezione Serie TV (se il sito non ha serietv elimina questa parte)
**Sezione Serie TV (se il sito non ha serietv elimina questa parte)**
Test da effettuare mentre sei nella pagina dei titoli.
Per ogni titolo verifica ci siano le voci nel menu contestuale.
@@ -167,7 +167,7 @@ Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati
**7. Eventuali problemi riscontrati**
- scrivi qui il problema/i
**Sezione Anime (se il sito non ha anime elimina questa parte)
**Sezione Anime (se il sito non ha anime elimina questa parte)**
Test da effettuare mentre sei nella pagina dei titoli. Per ogni titolo verifica ci siano le voci nel menu contestuale.

View File

@@ -3,9 +3,9 @@
"altadefinizione01_club": "https://www.altadefinizione01.cc",
"altadefinizione01_link": "http://altadefinizione01.town",
"altadefinizioneclick": "https://altadefinizione.cloud",
"altadefinizionehd": "https://altadefinizionetv.best",
"altadefinizionehd": "https://altadefinizione.wtf",
"animeforce": "https://ww1.animeforce.org",
"animeleggendari": "https://animepertutti.net",
"animeleggendari": "https://animepertutti.com",
"animespace": "http://www.animespace.tv",
"animestream": "https://www.animeworld.it",
"animesubita": "http://www.animesubita.org",
@@ -46,10 +46,10 @@
"serietvonline": "https://serietvonline.tech",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.club",
"streamingaltadefinizione": "https://www.streamingaltadefinizione.me",
"streamingaltadefinizione": "https://www.popcornstream.best",
"streamtime": "https://t.me/s/StreamTime",
"tantifilm": "https://www.tantifilm.eu",
"toonitalia": "https://toonitalia.org",
"vedohd": "https://vedohd.icu/",
"vedohd": "https://vedohd.video",
"vvvvid": "https://www.vvvvid.it"
}
}

View File

@@ -1,5 +1,5 @@
Rev:0.1
Update: 18-9-2019
Rev:0.2
Update: 03-10-2019
#####################
Promemoria da cancellare pena la non visibilità del canale in KOD!!
@@ -11,8 +11,7 @@ le voci in settings sono state inserite per l'unico scopo
di velocizzare la scrittura del file
Vanno lasciate solo quelle voci il cui funzionamento sul
canale non vanno attivate.
Per esempio se il canale non ha: newest()
lasciare le voci dove c'è newest nell'id. Es: include_in_newest_series
"not_active": ["include_in_newest"], VA INSERITO nei canali che NON hanno nessuna voce newest.
Ovviamente va mantenuto tutto il codice di quell'id tra le {}
se vanno cancellati tutti deve rimanere la voce:
"settings": []
@@ -20,12 +19,13 @@ se vanno cancellati tutti deve rimanere la voce:
{
"id": "nome del file .json",
"name": "Nome del canale visualizzato in KOD",
"language": ["ita", "vos"],
"language": ["ita", "sub-ita"],
"active": false,
"adult": false,
"thumbnail": "",
"banner": "",
"categories": ["movie", "tvshow", "anime", "vos", "documentary", "adult"],
"not_active": ["include_in_newest"],
"settings": [
{
"id": "include_in_global_search",

View File

@@ -1,30 +1,42 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per 'idcanale nel json'
# By: pincopallo!
# Eventuali crediti se vuoi aggiungerli
# ------------------------------------------------------------
# Rev: 0.2
# Update 18-09-2019
# Update 12-10-2019
# fix:
# 1. aggiunto pagination e sistemate alcune voci
# 2. modificato problemi in eccezioni
# 3. aggiunta la def select
# 4. modifica alla legenda e altre aggiunte
# Questo vuole solo essere uno scheletro per velocizzare la scrittura di un canale.
# La maggior parte dei canali può essere scritta con il decoratore.
# I commenti sono più un promemoria... che una vera e propria spiegazione!
# Niente di più.
# Ulteriori informazioni sono reperibili nel wiki:
# https://github.com/kodiondemand/addon/wiki/decoratori
"""
Questi sono commenti per i beta-tester.
Problemi noti che non superano il test del canale:
- indicare i problemi
Su questo canale, nella categoria 'Ricerca Globale'
non saranno presenti le voci 'Aggiungi alla Videoteca'
e 'Scarica Film'/'Scarica Serie', dunque,
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Novità. Indicare in quale/i sezione/i è presente il canale:
- Nessuna, film, serie, anime...
Avvisi:
- Eventuali avvisi per i tester
Ulteriori info:
"""
# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato
# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato ove occorre,
# ma fare PULIZIA quando si è finito di testarlo
# Qui gli import
@@ -37,7 +49,7 @@ from platformcode import config
# in caso di necessità
#from core import scrapertoolsV2, httptools, servertools, tmdb
#from core.item import Item
from core.item import Item # per newest
#from lib import unshortenit
##### fine import
@@ -48,7 +60,7 @@ from platformcode import config
# da cancellare se non utilizzata
__channel__ = "id nel json"
# da cancellare se si utilizza findhost()
host = config.get_channel_url('id nel json OR '__channel__) # <-- ATTENZIONE
host = config.get_channel_url('id nel json' OR __channel__) # <-- ATTENZIONE
headers = [['Referer', host]]
# Inizio findhost() - da cancellare se usato l'altro metodo
@@ -62,7 +74,7 @@ def findhost():
permUrl = httptools.downloadpage('INSERIRE-URL-QUI', follow_redirects=False).headers
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
# cancellare host non utilizzato
host = scrapertoolsV2.find_single_match(data, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"')
host = scrapertoolsV2.find_single_match(permUrl, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"')
headers = [['Referer', host]]
findhost() # così le imposta una volta per tutte
@@ -83,8 +95,7 @@ def mainlist(item):
# Ordine delle voci
# Voce FILM, puoi solo impostare l'url
film = ['',
#'url', # url per la voce FILM, se possibile la pagina principale con le ultime novità
film = ['', # url per la voce FILM, se possibile la pagina principale con le ultime novità
#Voce Menu,['url','action','args',contentType]
('Al Cinema', ['', 'peliculas', '']),
('Generi', ['', 'genres', 'genres']),
@@ -97,19 +108,17 @@ def mainlist(item):
]
# Voce SERIE, puoi solo impostare l'url
tvshow = ['',
#'url', # url per la voce Serie, se possibile la pagina con titoli di serie
tvshow = ['', # url per la voce Serie, se possibile la pagina con titoli di serie
#Voce Menu,['url','action','args',contentType]
('Novità', ['', '', ''])
('Novità', ['', '', '']),
('Per Lettera', ['', 'genres', 'letters']),
('Per Genere', ['', 'genres', 'genres']),
('Per anno', ['', 'genres', 'years'])
]
# Voce ANIME, puoi solo impostare l'url
anime = ['',
#'url', # url per la voce Anime, se possibile la pagina con titoli di anime
anime = ['', # url per la voce Anime, se possibile la pagina con titoli di anime
#Voce Menu,['url','action','args',contentType]
('Novità', ['', '', ''])
('Novità', ['', '', '']),
('In Corso',['', '', '', '']),
('Ultimi Episodi',['', '', '', '']),
('Ultime Serie',['', '', '', ''])
@@ -133,52 +142,53 @@ def mainlist(item):
nome = [( '' ['', '', '', ''])
return locals()
# riepilogo key per il match nei patron
# known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality',
# 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang']
# url = link relativo o assoluto
# title = titolo Film/Serie/Anime/Altro
# title2 = titolo dell'episodio Serie/Anime/Altro
# season = stagione in formato numerico
# episode = numero episodio, in formato numerico. Se il sito ha stagionexepisodio potete omettere season
# thumb = locandina Film/Serie/Anime/Altro
# quality = qualità indicata del video
# year = anno in formato numerico (4 cifre)
# duration = durata del Film/Serie/Anime/Altro
# genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia
# rating = punteggio/voto in formato numerico
# type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito
# lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA. Se non appare 'ITA' è di default
# Legenda known_keys per i groups nei patron
# known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality',
# 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang']
# url = link relativo o assoluto alla pagina titolo film/serie
# title = titolo Film/Serie/Anime/Altro
# title2 = titolo dell'episodio Serie/Anime/Altro
# season = stagione in formato numerico
# episode = numero episodio, in formato numerico.
# thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro
# quality = qualità indicata del video
# year = anno in formato numerico (4 cifre)
# duration = durata del Film/Serie/Anime/Altro
# genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia
# rating = punteggio/voto in formato numerico
# type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito
# lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA.
# AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!!
@support.scrape
def peliculas(item):
support.log(item)
#dbg # decommentare per attivare web_pdb
#support.dbg() # decommentare per attivare web_pdb
action = ''
blacklist = ['']
patron = r''
patronBlock = r''
patronNext = ''
pagination = 0
pagination = ''
debug = False # True per testare le regex sul sito
#debug = True # True per testare le regex sul sito
return locals()
@support.scrape
def episodios(item):
support.log(item)
#dbg
#support.dbg()
action = ''
blacklist = ['']
patron = r''
patronBlock = r''
patronNext = ''
pagination = 0
pagination = ''
debug = False
#debug = True
return locals()
# Questa def è utilizzata per generare i menu del canale
@@ -186,28 +196,50 @@ def episodios(item):
@support.scrape
def genres(item):
support.log(item)
#dbg
#support.dbg()
action = ''
blacklist = ['']
patron = r''
patronBlock = r''
patronNext = ''
pagination = 0
pagination = ''
debug = False
#debug = True
return locals()
############## Fine ordine obbligato
## Def ulteriori
# per quei casi dove il sito non differenzia film e/o serie e/o anime
# e la ricerca porta i titoli mischiati senza poterli distinguere tra loro
# andranno modificate anche le def peliculas e episodios ove occorre
def select(item):
support.log('select --->', item)
#support.dbg()
data = httptools.downloadpage(item.url, headers=headers).data
# pulizia di data, in caso commentare le prossime 2 righe
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
block = scrapertoolsV2.find_single_match(data, r'')
if re.findall('', data, re.IGNORECASE):
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
args='serie',
contentType='tvshow',
#data1 = data decommentando portiamo data nella def senza doverla riscaricare
))
############## Fondo Pagina
# da adattare al canale
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = '/index.php?do=search&story=%s&subaction=search' % (text)
item.url = host + '/index.php?do=search&story=%s&subaction=search' % (text)
# bisogna inserire item.contentType per la ricerca globale
# se il canale è solo film, si può omettere, altrimenti bisgona aggiungerlo e discriminare.
item.contentType = item.contentType
@@ -240,14 +272,15 @@ def newest(categoria):
except:
import sys
for line in sys.exc_info():
log('newest log: ', {0}.format(line))
support.log('newest log: ', {0}.format(line))
return []
return itemlist
# da adattare... ( support.server ha vari parametri )
# da adattare...
# consultare il wiki sia per support.server che ha vari parametri,
# sia per i siti con hdpass
#support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True)
def findvideos(item):
support.log('findvideos ->', item)
return support.server(item, headers=headers)

View File

@@ -1,7 +1,7 @@
{
"id": "altadefinizione01",
"name": "Altadefinizione01",
"language": ["ita", "vosi"],
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",

View File

@@ -3,7 +3,7 @@
"name": "Altadefinizione01 L",
"active": true,
"adult": false,
"language": ["ita","vosi"],
"language": ["ita","sub-ita"],
"thumbnail": "altadefinizione01_L.png",
"banner": "altadefinizione01_L.png",
"categories": ["movie","vos"],

View File

@@ -3,7 +3,7 @@
"name": "AltadefinizioneClick",
"active": true,
"adult": false,
"language": ["ita","vosi"],
"language": ["ita","sub-ita"],
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneclick.png",
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneciclk.png",
"categories": ["movie","vos"],

View File

@@ -24,25 +24,22 @@ headers = [['Referer', host]]
def mainlist(item):
anime = ['/lista-anime/',
('In Corso',['/lista-anime-in-corso/']),
('Ultimi Episodi',['','peliculas','update']),
('Ultime Serie',['/category/anime/articoli-principali/','peliculas','last'])
]
return locals()
def newest(categoria):
support.log(categoria)
itemlist = []
item = support.Item()
try:
if categoria == "anime":
item.contentType = 'tvshow'
item.url = host
item.args = 'update'
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
item.args = 'newest'
itemlist = peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -56,33 +53,33 @@ def search(item, texto):
search = texto
item.contentType = 'tvshow'
patron = '<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]'
action = 'episodios'
action = 'episodios'
return locals()
@support.scrape
def peliculas(item):
anime = True
if item.args == 'update':
action = 'episodios'
if item.args == 'newest':
patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>[^"]+").*?<h2><a href="(?P<url>[^"]+)"'
def itemHook(item):
delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodio.*)')
number = support.scrapertoolsV2.find_single_match(item.title, r'Episodio (\d+)')
item.url = support.match(item, '<a href="([^"]+)" class="btn', headers=headers)[0][0]
delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodi.*)')
number = support.scrapertoolsV2.find_single_match(item.title, r'Episodi(?:o)? (?:\d+÷)?(\d+)')
item.title = support.typo(number + ' - ','bold') + item.title.replace(delete,'')
item.fulltitle = item.show = item.fulltitle.replace(delete,'')
item.url = item.url.replace('-episodio-'+ number,'')
item.fulltitle = item.show = item.title.replace(delete,'')
item.number = number
return item
action = 'findvideos'
elif item.args == 'last':
patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>.*?)(?: Sub| sub| SUB|").*?<h2><a href="(?P<url>[^"]+)"'
action = 'episodios'
else:
pagination = ''
patron = '<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]'
action = 'episodios'
patron = r'<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]'
return locals()
@@ -102,16 +99,16 @@ def findvideos(item):
support.log(item)
itemlist = []
if item.number:
item.url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.number)[0][0]
if item.number:
item.url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.number)[0][0]
if 'http' not in item.url:
if '//' in item.url[:2]:
item.url = 'http:' + item.url
elif host not in item.url:
elif host not in item.url:
item.url = host + item.url
if 'adf.ly' in item.url:
item.url = adfly.get_long_url(item.url)
elif 'bit.ly' in item.url:
@@ -129,4 +126,4 @@ def findvideos(item):
support.server(item, itemlist=itemlist)
return itemlist
return itemlist

View File

@@ -3,7 +3,7 @@
"name": "AnimePerTutti",
"active": true,
"adult": false,
"language": ["ita", "vosi"],
"language": ["ita", "sub-ita"],
"thumbnail": "animepertutti.png",
"bannermenu": "animepertutti.png",
"categories": ["anime", "vos"],

View File

@@ -3,10 +3,10 @@
"name": "AnimeSubIta",
"active": true,
"adult": false,
"language": ["vosi"],
"language": ["sub-ita"],
"thumbnail": "animesubita.png",
"bannermenu": "animesubita.png",
"categories": ["anime", "vosi", "movie"],
"categories": ["anime", "vos", "movie"],
"settings": []
}

View File

@@ -3,7 +3,7 @@
"name": "AnimeWorld",
"active": true,
"adult": false,
"language": ["ita", "vosi"],
"language": ["ita", "sub-ita"],
"thumbnail": "animeworld.png",
"banner": "animeworld.png",
"categories": ["anime", "vos"],

View File

@@ -1,7 +1,7 @@
{
"id": "casacinema",
"name": "Casacinema",
"language": ["ita", "vosi"],
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",

View File

@@ -1,7 +1,7 @@
{
"id": "casacinemaInfo",
"name": "La Casa del Cinema",
"language": ["ita", "vosi"],
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "",

View File

@@ -1,7 +1,7 @@
{
"id": "cb01anime",
"name": "Cb01anime",
"language": ["ita", "vos", "vosi"],
"language": ["ita", "vos", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "cb01anime.png",

View File

@@ -21,7 +21,8 @@ headers = [['Referer', host]]
def mainlist(item):
anime = [('Genere',['','menu', '2']),
('Per Lettera',['','menu', '1']),
('Per Anno',['','menu', '3'])]
('Per Anno',['','menu', '3']),
('Ultimi Anime Aggiornati',['','peliculas', 'newest'])]
return locals()
@@ -39,12 +40,34 @@ def search(item, texto):
item.url = host + "/?s=" + texto
return peliculas(item)
def newest(categoria):
support.log(categoria)
itemlist = []
item = support.Item()
try:
if categoria == "anime":
item.url = host
item.args = 'newest'
itemlist = peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
return itemlist
@support.scrape
def peliculas(item):
blacklist = Blacklist
patron = r'<div class="span4">\s*<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+><\/a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> <h1>(?P<title>[^<\[]+)(?:\[(?P<lang>[^\]]+)\])?</h1></a>.*?-->(?:.*?<br />)?\s*(?P<plot>[^<]+)'
patronNext = r'<link rel="next" href="([^"]+)"'
item.contentType = 'tvshow'
if item.args == 'newest':
data = support.match(item)[1]
patron = r'<div id="blockvids"><ul><li><a href="(?P<url>[^"]+)"[^>]+><img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^\[]+)\[(?P<lang>[^\]]+)\]'
else:
patron = r'<div class="span4">\s*<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+><\/a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> <h1>(?P<title>[^<\[]+)(?:\[(?P<lang>[^\]]+)\])?</h1></a>.*?-->(?:.*?<br />)?\s*(?P<plot>[^<]+)'
patronNext = r'<link rel="next" href="([^"]+)"'
action = 'check'
return locals()
@@ -65,21 +88,38 @@ def episodios(item):
season = 1
s = 1
e = 0
sp = 0
for match in item.url:
if 'stagione' in match.lower():
find_season = support.match(match, r'Stagione\s*(\d+)')[0]
season = int(find_season[0]) if find_season else season + 1 if 'prima' not in match.lower() else season
else:
title = support.match(match,'<a[^>]+>([^<]+)</a>')[0][0]
if 'episodio' in title.lower():
ep = int(support.match(match, r'Episodio (\d+)')[0][0])
if season > s and ep > 1:
s += 1
e = ep - 1
title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title
data += title + '|' + match + '\n'
else:
try: title = support.match(match,'<a[^>]+>([^<]+)</a>')[0][0]
except: title = ''
if title:
if 'episodio' in title.lower():
ep = support.match(match, r'Episodio ((?:\d+.\d|\d+|\D+))')[0][0]
check = ep.isdigit()
if check or '.' in ep:
if '.' in ep:
sp += 1
title = '0' + 'x' + str(sp).zfill(2) + ' - ' + title
else:
ep = int(ep)
if season > s and ep > 1:
s += 1
e = ep - 1
title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title
data += title + '|' + match + '\n'
else:
title += ' #movie'
data += title + '|' + match + '\n'
def itemHook(item):
if '#movie' in item.title:
item.contentType='movie'
item.title = item.title.replace(' #movie','')
return item
patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n'
action = 'findvideos'
return locals()

View File

@@ -1,11 +1,11 @@
{
"id": "cineblog01",
"name": "CB01",
"language": ["ita"],
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "cb01.png",
"banner": "cb01.png",
"categories": ["tvshow", "movie", "vosi"],
"categories": ["tvshow", "movie", "vos"],
"settings": []
}

View File

@@ -18,7 +18,13 @@ headers = ""
def findhost():
global host, headers
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
if 'google' in permUrl['location']:
if host[:4] != 'http':
host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
else:
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
else:
host = permUrl['location']
headers = [['Referer', host]]
list_servers = ['verystream', 'openload', 'streamango', 'wstream']
@@ -72,7 +78,7 @@ def newest(categoria):
else:
patronBlock = r'Ultimi 100 film Aggiornati:(?P<block>.*?)<\/td>'
item = categoria
patron = "<a href=(?P<url>[^>]+)>(?P<title>[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N)\])?\s?(?:\[(?P<quality>HD|SD|HD/3D)\])?\s?\((?P<year>[0-9]{4})\)<\/a>"
patron = r'<a href="(?P<url>[^"]+)"\s*>(?P<title>[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N)\])?\s?(?:\[(?P<quality>HD|SD|HD/3D)\])?\s?\((?P<year>[0-9]{4})\)<\/a>'
pagination = 20
return locals()
@@ -160,13 +166,13 @@ def findvideos(item):
QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:]
# Estrae i contenuti - Streaming
load_links(itemlist, '<strong>Streaming:</strong>(.*?)<tableclass=cbtable height=30>', "orange", "Streaming", "SD")
load_links(itemlist, '<strong>Streaming:</strong>(.*?)cbtable', "orange", "Streaming", "SD")
# Estrae i contenuti - Streaming HD
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "yellow", "Streaming HD", "HD")
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)cbtable', "yellow", "Streaming HD", "HD")
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "pink", "Streaming 3D")
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)cbtable', "pink", "Streaming 3D")
return support.server(item, itemlist=itemlist)

View File

@@ -8,79 +8,13 @@
"banner": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
"categories": ["tvshow", "movie","anime"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://www.cinemalibero.fun/",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
{
"id": "include_in_newest_series",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,320 +1,240 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per CinemaLibero - First Version
# Canale per cinemalibero
# ------------------------------------------------------------
"""
Trasformate le sole def per support.menu e support.scrape
da non inviare nel test.
Test solo a trasformazione completa
Il canale non permette di filtrare film, serie e altro nella ricerca.
Quindi vengono disabilitate le voci:
- "Aggiungi in videoteca"
- "Scarica film/serie"
per le solo ricerce: nel canale e globale.
Problemi noti che non superano il test del canale:
-
Avvisi:
-
Ulteriori info:
"""
import re
from core import scrapertools, servertools, httptools, support
from core import tmdb
from core.item import Item
from lib import unshortenit
# per l'uso dei decoratori, per i log, e funzioni per siti particolari
from core import support
# se non si fa uso di findhost()
from platformcode import config
from platformcode import logger
from specials import autoplay
import channelselector
# Necessario per Autoplay
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['akstream', 'wstream', 'openload', 'streamango']
list_quality = ['default']
# Necessario per Verifica Link
checklinks = config.get_setting('checklinks', 'cinemalibero')
checklinks_number = config.get_setting('checklinks_number', 'cinemalibero')
# in caso di necessità
from core import scrapertoolsV2, httptools#, servertools
from core.item import Item # per newest
#from lib import unshortenit
__channel__ = "cinemalibero"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
##headers = [
## ['Host', host.split("//")[-1].split("/")[0]],
## ['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'],
## ['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
## ['Accept-Language', 'en-US,en;q=0.5'],
## ['Accept-Encoding', 'gzip, deflate'],
## ['Referer', host],
## ['DNT', '1'],
## ['Connection', 'keep-alive'],
## ['Upgrade-Insecure-Requests', '1'],
## ['Cache-Control', 'max-age=0']
## ]
list_servers = ['akstream', 'wstream', 'openload', 'streamango']
list_quality = ['default']
@support.menu
def mainlist(item):
support.log()
film = '/category/film/'
filmSub = [
('Generi', ['', 'genres']),
('Sport', ['/category/sport/', 'peliculas']),
]
tvshow = '/category/serie-tv/'
tvshowSub = [
('Anime ', ['/category/anime-giapponesi/', 'video'])
]
support.log(item)
film = ['/category/film/',
('Generi', ['', 'genres', 'genres']),
]
tvshow = ['/category/serie-tv/',
## ('Novità', ['/aggiornamenti-serie-tv/', 'peliculas', 'update']),
]
anime = ['/category/anime-giapponesi/',
]
search = ''
return locals()
@support.scrape
def genres(item):
support.log()
action='video'
patron=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
## return support.scrape2(item, patronBlock=r'<div id="bordobar" class="dropdown-menu(?P<block>.*)</li>',
## patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"',
## listGroups=['url', 'title'], action='video')
def peliculas(item):
support.log(item)
#support.dbg() # decommentare per attivare web_pdb
debug = True
blacklist = ['']
if item.args == 'search':
patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">.+?class="titolo">(?P<title>[^<]+)<'
patronBlock = r'style="color: #2C3549 !important;" class="fon my-3"><small>.+?</small></h1>(?P<block>.*?)<div class="bg-dark ">'
action = 'select'
else:
if item.contentType == 'tvshow':
# da sistemare per matchare le ultime serie inserite
if item.args == 'update':
patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^)]+)\)">[^>]+>(?P<title>.+?)(?:[ ]\((?P<lang>SubITA)\))?(?:[ ](?P<year>\d{4}))?</div> <div class="genere">(?:|Ep.)(?:|.+?)?</div>'
action = 'select'
else:
## #patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)<[^>]+>[^>]+>(?:.+?) (?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?'
## #patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?<[^>]+>(?:<div class="genere">.+?(?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?)?'
patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?</div><div class="genere">(?:.?(?P<episode>\d+x\d+-\d+|\d+-\d+|\d+x\d+|\d+)[ ]?(?:\()?(?:(?P<lang>ITA|iTA|Sub ITA|Sub iTA|Sub))?[ ]?(?:(?P<quality>HD))?.+?)</div>'
action = 'episodios'
if 'anime' in item.url:
patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?<[^>]+>(?:<div class="genere">.+?(?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?)?'
action = 'select'
elif item.contentType == 'movie':
action = 'findvideos'
patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?:\[(?P<lang>Sub-iTA|Sub-ITA|Sub)\])?[ ]\((?P<year>\d+)\)</div>(?:<div class="genere">(?P<quality>[^<]+)<)?'
patronBlock = r'<h1(?: style="color: #2C3549 !important; text-transform: uppercase;"| style="text-transform: uppercase; color: #2C3549 !important;"| style="color: #2C3549 !important; text-transform: uppercase;" style="text-shadow: 1px 1px 1px #FF8C00; color:#FF8C00;"| style="text-shadow: 1px 1px 1px #0f0f0f;" class="darkorange"| style="color:#2C3549 !important;")>.+?</h1>(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
patronNext = '<a class="next page-numbers".*?href="([^"]+)">'
return locals()
def peliculas(item):
logger.info('[cinemalibero.py] video')
itemlist = []
if host not in item.url:
item.url = host + item.url
@support.scrape
def episodios(item):
support.log(item)
# Carica la pagina
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12[^"]*?">(.*?)<div class=(?:"container"|"bg-dark ")>')
#dbg
## if item.args == '':
## patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(\d+)?)(?:\((?P<lang>Sub ITA)\))?</a><br />'
## patronBlock = r'(?:class="txt_dow">Streaming:(?P<block>.*?)at-below-post)'
if item.data1 and 'stagione' not in item.data1.lower():
# è un movie
item.contentType = 'tvshow'
#patron = r'(?:href="[ ]?(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)<|(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?(?:(\4[^<]+)(\2.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>))'
patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(?P<episode>\d+)?)(?:\((?P<lang>Sub ITA)\))?</a>(?:<br />)?'
patronBlock = r'(?:class="txt_dow">Streaming:(?P<block>.*?)at-below-post)'
else:
patron = r'(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?[ ]?(?:(?P<title>[^<]+)(?P<url>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)'
## patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(\d+)?)(?:\((?P<lang>Sub ITA)\))?</a><br />'
patronBlock = r'<p><strong>(?P<block>(?:.+?[Ss]tagione.+?(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?(?:|.+?|</strong>)(/?:</span>)?</p>.*?</p>)'
item.contentType = 'tvshow'
# Estrae i contenuti
matches = re.compile(r'<div class="col-lg-3">(.*?)<\/a><\/div>', re.DOTALL).findall(block)
action = 'findvideos'
blacklist = ['']
for match in matches:
url = scrapertools.find_single_match(match, r'href="([^"]+)"')
long_title = scrapertools.find_single_match(match, r'<div class="titolo">([^<]+)<\/div>')
thumb = scrapertools.find_single_match(match, r'url=\((.*?)\)')
quality = scrapertools.find_single_match(match, r'<div class="voto">([^<]+)<\/div>')
genere = scrapertools.find_single_match(match, r'<div class="genere">([^<]+)<\/div>')
## pagination = ''
year = scrapertools.find_single_match(long_title, r'\(([0-9)]+)') or scrapertools.find_single_match(long_title, r'\) ([0-9)]+)')
lang = scrapertools.find_single_match(long_title, r'\(([a-zA-Z)]+)')
title = re.sub(r'\(.*','',long_title)
title = re.sub(r'(?:\(|\))','',title)
if genere:
genere = ' - [' + genere + ']'
if year:
long_title = title + ' - ('+ year + ')' + genere
if lang:
long_title = '[B]' + title + '[/B]' + ' - ('+ lang + ')' + genere
else:
long_title = '[B]' + title + '[/B]'
# Seleziona fra Serie TV e Film
if item.contentType == 'movie':
tipologia = 'movie'
action = 'findvideos'
elif item.contentType == 'episode':
tipologia = 'tv'
action = 'episodios'
else:
tipologia = 'movie'
action = 'select'
## debug = True
return locals()
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
title=long_title,
fulltitle=title,
quality=quality,
url=url,
thumbnail=thumb,
infoLabels={'year': year},
show=title))
# Next page
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers".*?href="([^"]+)">')
@support.scrape
def genres(item):
support.log(item)
#dbg
if next_page != '':
itemlist.append(
Item(channel=item.channel,
action='video',
title='[B]' + config.get_localized_string(30992) + ' &raquo;[/B]',
url=next_page,
contentType=item.contentType,
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
action = 'peliculas'
blacklist = ['']
patron = r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
return locals()
def select(item):
support.log('select --->', item)
#support.dbg()
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
block = scrapertoolsV2.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<div style="margin-left: 0.5%; color: #FFF;">')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
logger.info('select = ### è una serie ###')
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
extra='serie',
contentType='episode'))
args='serie',
contentType='tvshow',
data1 = data
))
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodio', block, re.IGNORECASE):
logger.info('select = ### è un anime ###')
if re.findall('episodio', block, re.IGNORECASE) or re.findall('stagione', data, re.IGNORECASE):
support.log('select = ### è un anime ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
extra='anime',
contentType='episode'))
args='anime',
contentType='tvshow',
data1 = data
))
else:
logger.info('select = ### è un film ###')
support.log('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
args = '',
contentType='movie',
#data = data
))
else:
logger.info('select = ### è un film ###')
support.log('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
contentType='movie',
#data = data
))
def search(item, texto):
support.log("[cinemalibero.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
item.args = 'search'
item.contentType = 'episode' # non fa uscire le voci nel context menu
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log("%s" % line)
return []
def findvideos(item): # Questa def. deve sempre essere nominata findvideos
logger.info('[cinemalibero.py] findvideos')
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.args = 'update'
item.url = host+'/aggiornamenti-serie-tv/'
item.contentType = 'tvshow'
item.action = 'peliculas'
itemlist = peliculas(item)
if item.args == 'direct':
return servertools.find_video_items(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
log('newest log: ', {0}.format(line))
return []
if item.contentType == 'episode':
data = item.url.lower()
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>*?<\/h2>(.*?)<\/div>')
urls = re.findall('<a.*?href="([^"]+)"', block, re.DOTALL)
else:
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub(r'\n|\t','',data).lower()
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>(.*?)<strong>')
urls = re.findall('<a href="([^"]+)".*?class="external"', block, re.DOTALL)
logger.info('URLS'+ str(urls))
if urls:
data =''
for url in urls:
url, c = unshortenit.unshorten(url)
data += url + '\n'
logger.info('DATA'+ data)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle + ' - [COLOR limegreen][[/COLOR]'+videoitem.title+' [COLOR limegreen]][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
# Link Aggiungi alla Libreria
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
itemlist.append(
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle))
# Necessario per filtrare i Link
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Necessario per FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Necessario per AutoPlay
autoplay.start(itemlist, item)
return itemlist
def episodios(item): # Questa def. deve sempre essere nominata episodios
logger.info('[cinemalibero.py] episodios')
itemlist = []
extra = ''
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)at-below-post')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
# logger.info('select = ### è una serie ###')
extra='serie'
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodi', block, re.IGNORECASE):
# logger.info('select = ### è un anime ###')
extra='anime'
block = re.sub(r'<h2>.*?<\/h2>','',block)
block = block.replace('<p>','').replace('<p style="text-align: left;">','').replace('<','<').replace('-<','<').replace('&#8211;<','<').replace('&#8211; <','<').replace('<strong>','<stop><start><strong>')+'<stop>'
block = re.sub(r'stagione completa.*?<\/p>','',block,flags=re.IGNORECASE)
if extra == 'serie':
block = block.replace('<br /> <a','<a')
matches = re.compile(r'<start>.*?(?:stagione|Stagione)(.*?)<\/(?:strong|span)><\/p>(.*?)<stop>', re.DOTALL).findall(block)
if not matches:
matches = scrapertools.find_multiple_matches(block, r'<a href="([^"]+)"[^>]+>(Episodio [0-9]+)</a>')
for scrapedurl, scrapedtitle in matches:
scrapedtitle = re.sub(r'Episodio ([0-9]+)', r'Episodio 1x\1', scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
show=item.fulltitle,
url=scrapedurl,
args='direct'))
else:
for lang, html in matches:
lang = re.sub('<.*?>','',lang)
html = html.replace('<br />','\n').replace('</p>', '\n')
matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html)
for scrapedtitle, html in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle + ' - (' + lang + ')',
fulltitle=scrapedtitle,
show=item.fulltitle,
url=html))
elif extra == 'anime':
block = re.sub(r'<start.*?(?:download:|Download:).*?<stop>','',block)
block = re.sub(r'(?:mirror|Mirror)[^<]+<','',block)
block = block.replace('<br />','\n').replace('/a></p>','\n')
block = re.sub(r'<start.*?(?:download|Download).*?\n','',block)
matches = re.compile('<a(.*?)\n', re.DOTALL).findall(block)
for html in matches:
scrapedtitle = scrapertools.find_single_match(html, r'>(.*?)<\/a>')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
show=item.fulltitle,
url=html))
def findvideos(item):
support.log(item)
if item.contentType == 'movie':
return support.server(item)
else:
logger.info('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
show=item.fulltitle,
contentType='movie'))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
return support.server(item, data= item.url)

View File

@@ -1,7 +1,7 @@
{
"id": "dreamsub",
"name": "DreamSub",
"language": ["ita", "vosi"],
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "dreamsub.png",

View File

@@ -1,7 +1,7 @@
{
"id": "fastsubita",
"name": "Fastsubita",
"language": ["vosi"],
"language": ["sub-ita"],
"active": true,
"adult": false,
"thumbnail": "fastsubita.png",

View File

@@ -3,7 +3,7 @@
"name": "Italia Serie",
"active": true,
"adult": false,
"language": ["ita","vosi"],
"language": ["ita","sub-ita"],
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiaserie.png",
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiaserie.png",
"categories": ["tvshow"],

View File

@@ -6,7 +6,7 @@
"adult": false,
"thumbnail": "mondolunatico2.png",
"banner": "mondolunatico2.png",
"categories": ["tvshow", "movie", "vosi", "anime"],
"categories": ["tvshow", "movie", "vos", "anime"],
"settings": [
{
"id": "include_in_global_search",

View File

@@ -7,6 +7,5 @@
"thumbnail": "seriehd.png",
"banner": "seriehd.png",
"categories": ["tvshow"],
"not_active": ["include_in_newest"],
"settings": []
}

View File

@@ -7,9 +7,6 @@
from core import scrapertoolsV2, httptools, support
from core.item import Item
##__channel__ = 'seriehd'
# host = support.config.get_channel_url(__channel__)
# impostati dinamicamente da findhost()
host = ''
headers = ''
@@ -26,9 +23,6 @@ findhost()
list_servers = ['verystream', 'openload', 'streamango', 'thevideome']
list_quality = ['1080p', '720p', '480p', '360']
##checklinks = support.config.get_setting('checklinks', __channel__)
##checklinks_number = support.config.get_setting('checklinks_number', __channel__)
@support.menu
def mainlist(item):
@@ -82,13 +76,31 @@ def episodios(item):
episodes = support.match(item, r'<a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0]
for episode_url, episode in episodes:
episode_url = support.urlparse.urljoin(url, episode_url)
title = season + "x" + episode.zfill(2)
title = season + "x" + episode.zfill(2) + ' - ' + item.fulltitle
data += title + '|' + episode_url + '\n'
support.log('DaTa= ',data)
patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n'
action = 'findvideos'
return locals()
def newest(categoria):
support.log(categoria)
itemlist = []
item = support.Item()
try:
if categoria == "series":
item.url = host
item.contentType = 'tvshow'
itemlist = peliculas(item)
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
return itemlist
def findvideos(item):
support.log()

View File

@@ -1,19 +1,11 @@
{
"id": "streamingaltadefinizione",
"name": "Streaming Altadefinizione",
"name": "Popcorn Stream",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "https://www.streamingaltadefinizione.world/wp-content/uploads/2018/09/StreamingAltadefinizioneLogo.png",
"thumbnail": "https://www.popcornstream.best/wp-content/uploads/2019/09/PopLogo40.png",
"banner": "https://www.popcornstream.info/media/PopcornStream820x428.png",
"categories": ["movie","tvshow","anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
}
]
"settings": []
}

View File

@@ -1,33 +1,38 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Streaming Altadefinizione
# Canale per Popcorn Stream
# ------------------------------------------------------------
"""
Trasformate le sole def per support.menu e support.scrape
da non inviare nel test.
Test solo a trasformazione completa
"""
from core import support
from core import support, httptools
from core.item import Item
from specials import autoplay
from platformcode import config
__channel__ = "streamingaltadefinizione"
host = config.get_channel_url(__channel__)
# __channel__ = "streamingaltadefinizione"
# host = config.get_channel_url(__channel__)
host = headers = ''
list_servers = ['verystream', 'openload', 'wstream']
list_quality = ['1080p', 'HD', 'DVDRIP', 'SD', 'CAM']
def findhost():
global host, headers
permUrl = httptools.downloadpage('https://www.popcornstream.info', follow_redirects=False).headers
if 'google' in permUrl['location']:
if host[:4] != 'http':
host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
else:
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
else:
host = permUrl['location']
headers = [['Referer', host]]
@support.menu
def mainlist(item):
findhost()
film = ["/film/"]
anime = ["/genere/anime/",
('Film Anime', ["/genere/anime/", 'peliculas']),
('Film Anime per genere', ["/genere/anime/", 'generos'])
]
anime = ["/genere/anime/"]
tvshow = ["/serietv/"]
top = [('Generi',['', 'generos'])]
return locals()
@@ -49,30 +54,29 @@ def generos(item):
def peliculas(item):
return support.dooplay_films(item)
findhost()
return support.dooplay_peliculas(item, True if "/genere/" in item.url else False)
def episodios(item):
findhost()
return support.dooplay_get_episodes(item)
def findvideos(item):
findhost()
itemlist = []
for link in support.dooplay_get_links(item, host):
server = link['server'][:link['server'].find(".")]
itemlist.append(
Item(channel=item.channel,
action="play",
title=server + " [COLOR blue][" + link['title'] + "][/COLOR]",
url=link['url'],
server=server,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=link['title'],
contentType=item.contentType,
folder=False))
if link['title'] != 'Guarda il trailer':
itemlist.append(
Item(channel=item.channel,
action="play",
url=link['url'],
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=link['title'],
contentType=item.contentType,
folder=False))
autoplay.start(itemlist, item)
return itemlist
return support.server(item, itemlist=itemlist)

View File

@@ -6,7 +6,7 @@
"adult": false,
"thumbnail": "toonitalia.png",
"banner": "toonitalia.png",
"categories": ["tvshow", "movie", "vosi", "anime"],
"categories": ["tvshow", "movie", "vos", "anime"],
"settings": [
{
"id": "include_in_global_search",

View File

@@ -40,7 +40,7 @@ def search(item, text):
def peliculas(item):
return support.dooplay_films(item, blacklist)
return support.dooplay_peliculas(item, False, blacklist)
def findvideos(item):

View File

@@ -88,7 +88,7 @@ def getchanneltypes(view="thumb_"):
channel_types.append("adult")
# channel_language = config.get_setting("channel_language", default="all")
channel_language = auto_filter()[0]
channel_language = auto_filter()
logger.info("channel_language=%s" % channel_language)
# Ahora construye el itemlist ordenadamente
@@ -138,7 +138,7 @@ def filterchannels(category, view="thumb_"):
logger.info("channel_files encontrados %s" % (len(channel_files)))
# channel_language = config.get_setting("channel_language", default="all")
channel_language, channel_language_list = auto_filter()
channel_language = auto_filter()
logger.info("channel_language=%s" % channel_language)
for channel_path in channel_files:
@@ -193,15 +193,14 @@ def filterchannels(category, view="thumb_"):
# Se muestran sólo los idiomas filtrados, cast o lat
# Los canales de adultos se mostrarán siempre que estén activos
for c in channel_language_list:
if c in channel_parameters["language"]:
L = True
else:
L = False
# for channel_language_list in channel_language_list:
# if c in channel_parameters["language"]:
# L = True
# else:
# L = False
# logger.info('CCLANG= ' + channel_language + ' ' + str(channel_language_list))
if channel_language != "all" and "*" not in channel_parameters["language"] \
and L == False and channel_language not in channel_parameters["language"]:
logger.info('STOP!!!!')
and channel_language not in str(channel_parameters["language"]):
continue
# Se salta el canal si está en una categoria filtrado
@@ -291,12 +290,13 @@ def set_channel_info(parameters):
content = ''
langs = parameters['language']
lang_dict = {'ita':'Italiano',
'vosi':'Sottotitolato in Italiano',
'sub-ita':'Sottotitolato in Italiano',
'*':'Italiano, Sottotitolato in Italiano'}
for lang in langs:
# if 'vos' in parameters['categories']:
# lang = '*'
# if 'vosi' in parameters['categories']:
# if 'sub-ita' in parameters['categories']:
# lang = 'ita'
if lang in lang_dict:
@@ -320,25 +320,33 @@ def set_channel_info(parameters):
def auto_filter(auto_lang=False):
import xbmc, xbmcaddon
addon = xbmcaddon.Addon('metadata.themoviedb.org')
def_lang = addon.getSetting('language')
lang = 'all'
lang_list = ['all']
lang_dict = {'it':'ita'}
lang_list_dict = {'it':['ita','vosi']}
if config.get_setting("channel_language") == 'auto' or auto_lang == True:
lang = lang_dict[def_lang]
lang_list = lang_list_dict[def_lang]
lang = config.get_localized_string(20001)
else:
lang = config.get_setting("channel_language", default="all")
lang_list = lang_list_dict[def_lang]
return lang, lang_list
return lang
# import xbmc, xbmcaddon
# addon = xbmcaddon.Addon('metadata.themoviedb.org')
# def_lang = addon.getSetting('language')
# lang = 'all'
# lang_list = ['all']
# lang_dict = {'it':'ita'}
# lang_list_dict = {'it':['ita','vosi']}
# if config.get_setting("channel_language") == 'auto' or auto_lang == True:
# lang = lang_dict[def_lang]
# lang_list = lang_list_dict[def_lang]
# else:
# lang = config.get_setting("channel_language", default="all")
# lang_list = lang_list_dict[def_lang]
# return lang, lang_list
def thumb(itemlist=[], genre=False):

View File

@@ -258,7 +258,7 @@ def get_default_settings(channel_name):
else:
control['label'] = config.get_localized_string(70727) + ' - ' + label.capitalize()
control['default'] = True if control['id'] not in default_off else False
control['default'] = control['default'] if control['id'] not in default_off else False
channel_controls.append(control)
# elif control['id'] == 'filter_languages':
@@ -269,7 +269,7 @@ def get_default_settings(channel_name):
elif control['id'] not in not_active and 'include_in_newest' not in control['id']:
if type(control['default']) == bool:
control['default'] = True if control['id'] not in default_off else False
control['default'] = control['default'] if control['id'] not in default_off else False
channel_controls.append(control)
if renumber:

View File

@@ -318,7 +318,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
video_urls.extend(response)
except:
logger.error("Error al obtener la url en modo free")
error_messages.append("Se ha producido un error en %s" % server_name)
error_messages.append(config.get_localized_string(60006) % server_name)
import traceback
logger.error(traceback.format_exc())

View File

@@ -18,6 +18,7 @@ from specials import autoplay
def hdpass_get_servers(item):
# Carica la pagina
itemlist = []
data = httptools.downloadpage(item.url).data.replace('\n', '')
patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
url = scrapertoolsV2.find_single_match(data, patron).replace("?alta", "")
@@ -46,23 +47,21 @@ def hdpass_get_servers(item):
mir = scrapertoolsV2.find_single_match(data, patron_mir)
for mir_url, server in scrapertoolsV2.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):
for mir_url, srv in scrapertoolsV2.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):
data = httptools.downloadpage(urlparse.urljoin(url, mir_url)).data.replace('\n', '')
for media_label, media_url in scrapertoolsV2.find_multiple_matches(data, patron_media):
itemlist.append(Item(channel=item.channel,
action="play",
title=item.title + typo(server, '-- [] color kod') + typo(res_video, '-- [] color kod'),
fulltitle=item.fulltitle,
quality=res_video,
show=item.show,
thumbnail=item.thumbnail,
contentType=item.contentType,
server=server,
url=url_decode(media_url)))
log("video -> ", res_video)
return controls(itemlist, item, AutoPlay=True, CheckLinks=True)
return server(item, itemlist=itemlist)
def url_decode(url_enc):
@@ -182,6 +181,24 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
regexDbg(item, patron, headers, block)
known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang']
# Legenda known_keys per i groups nei patron
# known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality',
# 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang']
# url = link relativo o assoluto alla pagina titolo film/serie
# title = titolo Film/Serie/Anime/Altro
# title2 = titolo dell'episodio Serie/Anime/Altro
# season = stagione in formato numerico
# episode = numero episodio, in formato numerico.
# thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro
# quality = qualità indicata del video
# year = anno in formato numerico (4 cifre)
# duration = durata del Film/Serie/Anime/Altro
# genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia
# rating = punteggio/voto in formato numerico
# type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito
# lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA.
# AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!!
stagione = '' # per quei siti che hanno la stagione nel blocco ma non nelle puntate
for i, match in enumerate(matches):
if pagination and (pag - 1) * pagination > i: continue # pagination
@@ -204,6 +221,9 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
season = scraped['season']
if stagione:
episode = season +'x'+ scraped['episode']
elif item.contentType == 'tvshow' and (scraped['episode'] == '' and season == ''):
item.news = 'season_completed'
episode = ''
else:
episode = re.sub(r'\s-\s|-|x|&#8211|&#215;', 'x', scraped['episode']) if scraped['episode'] else ''
@@ -216,11 +236,10 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
# make formatted Title [longtitle]
s = ' - '
title = episode + (s if episode and title else '') + title
title = episode + (s if episode and title else '') + title
longtitle = title + (s if title and title2 else '') + title2
longtitle = typo(longtitle, 'bold')
longtitle += (typo(Type,'_ () bold') if Type else '') + (typo(quality, '_ [] color kod') if quality else '')
longtitle += typo(quality, '_ [] color kod') if quality else ''
lang1, longtitle = scrapeLang(scraped, lang, longtitle)
@@ -275,10 +294,11 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
infoLabels=infolabels,
thumbnail=item.thumbnail if function == 'episodios' else scraped["thumb"] ,
args=item.args,
contentSerieName= title if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '',
contentTitle= title if item.contentType or CT == 'movie' else '',
contentSerieName= scraped['title'] if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '',
contentTitle= scraped['title'] if item.contentType or CT == 'movie' else '',
contentLanguage = lang1,
contentEpisodeNumber=episode if episode else ''
contentEpisodeNumber=episode if episode else '',
news= item.news if item.news else ''
)
for lg in list(set(listGroups).difference(known_keys)):
@@ -319,10 +339,10 @@ def scrape(func):
# IMPORTANT 'type' is a special key, to work need typeContentDict={} and typeActionDict={}
def wrapper(*args):
function = func.__name__
itemlist = []
args = func(*args)
function = func.__name__ if not 'actLike' in args else args['actLike']
# log('STACK= ',inspect.stack()[1][3])
item = args['item']
@@ -342,8 +362,8 @@ def scrape(func):
headers = ''
patronNext = args['patronNext'] if 'patronNext' in args else ''
patronBlock = args['patronBlock'] if 'patronBlock' in args else ''
typeActionDict = args['type_action_dict'] if 'type_action_dict' in args else {}
typeContentDict = args['type_content_dict'] if 'type_content_dict' in args else {}
typeActionDict = args['typeActionDict'] if 'typeActionDict' in args else {}
typeContentDict = args['typeContentDict'] if 'typeContentDict' in args else {}
debug = args['debug'] if 'debug' in args else False
log('STACK= ', inspect.stack()[1][3])
if 'pagination' in args and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: pagination = args['pagination'] if args['pagination'] else 20
@@ -389,17 +409,18 @@ def scrape(func):
# next page for pagination
if pagination and len(matches) >= pag * pagination:
itemlist.append(
Item(channel=item.channel,
action = item.action,
contentType=item.contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
fulltitle= item.fulltitle,
show= item.show,
url=item.url,
args=item.args,
page=pag + 1,
thumbnail=thumb()))
if inspect.stack()[1][3] != 'get_newest':
itemlist.append(
Item(channel=item.channel,
action = item.action,
contentType=item.contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
fulltitle= item.fulltitle,
show= item.show,
url=item.url,
args=item.args,
page=pag + 1,
thumbnail=thumb()))
if action != 'play' and function != 'episodios' and 'patronMenu' not in args:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -475,29 +496,31 @@ def dooplay_get_links(item, host):
@scrape
def dooplay_get_episodes(item):
item.contentType = "episode"
patron = '<li class="mark-[0-9]+">.*?<img.*?(?:data-lazy-)?src="(?P<thumb>[^"]+).*?(?P<episode>[0-9]+ - [0-9]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+).*?(?P<year>[0-9]{4})'
def itemlistHook(itemlist):
return videolibrary(itemlist, item, function='episodios')
actLike = 'episodios'
return locals()
@scrape
def dooplay_films(item, blacklist=""):
def dooplay_peliculas(item, mixed=False, blacklist=""):
actLike = 'peliculas'
if item.args == 'searchPage':
return dooplay_search_vars(item, blacklist)
else:
if item.contentType == 'movie':
action = 'findvideos'
patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?<span class="quality">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>[^<>]*(?P<year>[0-9]{4})</span>|</article>).*?(?:<span>(?P<duration>[0-9]+) min</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)'
patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?<span class="quality">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>[^<>]*(?P<year>[0-9]{4})</span>|</article>)'
else:
action = 'episodios'
patron = '<article id="post-[0-9]+" class="item tvshows">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)'
patron = '<article id="post-[0-9]+" class="item (?P<type>' + ('\w+' if mixed else 'tvshows') + ')">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)'
patronNext = '<div class="pagination">.*?class="current".*?<a href="([^"]+)".*?<div class="resppages">'
addVideolibrary = False
if mixed:
typeActionDict={'findvideos': ['movies'], 'episodios': ['tvshows']}
typeContentDict={'film': ['movies'], 'serie': ['tvshows']}
return locals()
@@ -505,8 +528,13 @@ def dooplay_films(item, blacklist=""):
def dooplay_search(item, blacklist=""):
return dooplay_search_vars(item, blacklist)
def dooplay_search_vars(item, blacklist):
if item.contentType == 'movie':
if item.contentType == 'list': # ricerca globale
type = '(?P<type>movies|tvshows)'
typeActionDict = {'findvideos': ['movies'], 'episodios': ['tvshows']}
typeContentDict = {'movie': ['movies'], 'episode': ['tvshows']}
elif item.contentType == 'movie':
type = 'movies'
action = 'findvideos'
else:
@@ -514,12 +542,13 @@ def dooplay_search_vars(item, blacklist):
action = 'episodios'
patron = '<div class="result-item">.*?<img src="(?P<thumb>[^"]+)".*?<span class="' + type + '">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a>.*?<span class="year">(?P<year>[0-9]{4}).*?<div class="contenido"><p>(?P<plot>[^<>]+)'
patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"'
def fullItemlistHook(itemlist):
# se è una next page
if itemlist[-1].title == typo(config.get_localized_string(30992), 'color kod bold'):
itemlist[-1].action = 'peliculas'
itemlist[-1].args = 'searchPage'
return itemlist
# def fullItemlistHook(itemlist):
# # se è una next page
# if itemlist[-1].title == typo(config.get_localized_string(30992), 'color kod bold'):
# itemlist[-1].action = 'peliculas'
# itemlist[-1].args = 'searchPage'
# return itemlist
return locals()
def swzz_get_url(item):
@@ -747,6 +776,7 @@ def match(item, patron='', patronBlock='', headers='', url='', post=''):
if patron:
matches = scrapertoolsV2.find_multiple_matches(block, patron)
if not matches: matches = ['']
log('MATCHES= ',matches)
return matches, block
@@ -782,7 +812,8 @@ def download(itemlist, item, typography='', function_level=1, function=''):
url=item.url,
action='save_download',
from_action=from_action,
contentTitle=contentTitle
contentTitle=contentTitle,
path=item.path
))
if from_action == 'episodios':
itemlist.append(
@@ -827,7 +858,8 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''):
contentTitle=item.contentTitle if item.contentTitle else ''
if (function == 'findvideos' and contentType == 'movie') \
or (function == 'episodios' and contentType != 'movie'):
or (function == 'episodios' and contentType != 'movie') \
or function == 'get_seasons' and item.channel == 'community':
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
@@ -839,7 +871,8 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''):
url=item.url,
action=action,
extra=extra,
contentTitle=contentTitle
contentTitle=contentTitle,
path=item.path
))
return itemlist
@@ -882,23 +915,33 @@ def pagination(itemlist, item, page, perpage, function_level=1):
def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True, down_load=True):
if not data:
if not data and not itemlist:
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
itemList = servertools.find_video_items(data=str(data))
itemlist = itemlist + itemList
verifiedItemlist = []
for videoitem in itemlist:
if not videoitem.server:
findS = servertools.findvideos(videoitem.url)
if findS:
findS = findS[0]
else:
log(videoitem, 'Non supportato')
continue
videoitem.server = findS[2]
videoitem.title = findS[0]
item.title = item.contentTitle if config.get_localized_string(30161) in item.title else item.title
videoitem.title = item.title + (typo(videoitem.title, '_ color kod []') if videoitem.title else "") + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "")
videoitem.title = item.fulltitle + (typo(videoitem.title, '_ color kod []') if videoitem.title else "") + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "")
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
verifiedItemlist.append(videoitem)
return controls(itemlist, item, AutoPlay, CheckLinks, down_load)
return controls(verifiedItemlist, item, AutoPlay, CheckLinks, down_load)
def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True):
from core import jsontools

View File

@@ -833,7 +833,8 @@ def add_tvshow(item, channel=None):
# del item.tmdb_stat #Limpiamos el status para que no se grabe en la Videoteca
# Obtiene el listado de episodios
itemlist = getattr(channel, item.action)(item)
if item.channel == 'community':
itemlist = getattr(channel, item.action)(item)
insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist)

View File

@@ -84,17 +84,31 @@ class Client(object):
def get_files(self):
files = []
enc_url = None
if self.files:
for file in self.files:
n = file.name.encode("utf8")
u = "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(n)
s = file.size
file_id = file.file_id
enc_url = file.url
files.append({"name":n,"url":u,"size":s, "id": file_id})
if len(self.files) == 1:
try:
code = httptools.downloadpage(enc_url, only_headers=True).code
if code > 300:
return code
else:
return files
except:
print(traceback.format_exc())
pass
return files
def add_url(self, url):
url = url.split("/#")[1]
url = url.split("#")[1]
id_video = None
if "|" in url:
url, id_video = url.split("|")
@@ -135,7 +149,8 @@ class Client(object):
def api_req(self, req, get=""):
seqno = random.randint(0, 0xFFFFFFFF)
url = 'https://g.api.mega.co.nz/cs?id=%d%s' % (seqno, get)
return json.loads(self.post(url, json.dumps([req])))[0]
page = httptools.downloadpage(url, post=json.dumps([req])).data
return json.loads(page)[0]
def base64urldecode(self,data):
data += '=='[(2 - len(data) * 3) % 4:]
@@ -165,12 +180,11 @@ class Client(object):
def aes_cbc_decrypt(self, data, key):
try:
from Cryptodome.Cipher import AES
decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16)
except:
from Crypto.Cipher import AES
decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16)
#decryptor = aes.AESModeOfOperationCBC(key, iv='\0' * 16)
except:
import jscrypto
decryptor = jscrypto.new(key, jscrypto.MODE_CBC, '\0' * 16)
return decryptor.decrypt(data)
def aes_cbc_decrypt_a32(self,data, key):
@@ -179,20 +193,6 @@ class Client(object):
def decrypt_key(self,a, key):
return sum((self.aes_cbc_decrypt_a32(a[i:i+4], key) for i in xrange(0, len(a), 4)), ())
def post(self, url, data):
return httptools.downloadpage(url, data).data
import ssl
from functools import wraps
def sslwrap(func):
@wraps(func)
def bar(*args, **kw):
kw['ssl_version'] = ssl.PROTOCOL_TLSv1
return func(*args, **kw)
return bar
ssl.wrap_socket = sslwrap(ssl.wrap_socket)
return urllib.urlopen(url, data).read()
def dec_attr(self, attr, key):
attr = self.aes_cbc_decrypt(attr, self.a32_to_str(key)).rstrip('\0')
if not attr.endswith("}"):

View File

@@ -1,4 +1,5 @@
import urllib2
import traceback
class Cursor(object):
def __init__(self, file):
@@ -21,9 +22,11 @@ class Cursor(object):
req.headers['Range'] = 'bytes=%s-' % (offset)
try:
self.conn = urllib2.urlopen(req)
self.prepare_decoder(offset)
try:
self.prepare_decoder(offset)
except:
print(traceback.format_exc())
except:
#La url del archivo expira transcurrido un tiempo, si da error 403, reintenta volviendo a solicitar la url mediante la API
self.mega_request(offset, True)
def read(self,n=None):
@@ -35,7 +38,6 @@ class Cursor(object):
self.pos+=len(res)
return res
def seek(self,n):
if n>self._file.size:
n=self._file.size
@@ -53,20 +55,21 @@ class Cursor(object):
def __exit__(self,exc_type, exc_val, exc_tb):
self._file.cursors.remove(self)
if len(self._file.cursors) == 0: self._file.cursor = False
def decode(self, data):
return self.decryptor.decrypt(data)
def prepare_decoder(self,offset):
initial_value = self.initial_value + int(offset/16)
try:
from Cryptodome.Cipher import AES
from Cryptodome.Util import Counter
self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value))
except:
from Crypto.Cipher import AES
from Crypto.Util import Counter
self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value))
except:
from pyaes import aes
self.decryptor = aes.AESModeOfOperationCTR(f=self,key=self._client.a32_to_str(self.k),counter=aes.Counter(initial_value=initial_value))
rest = offset - int(offset/16)*16
if rest:
self.decode(str(0)*rest)
self.decode(str(0)*rest)

View File

@@ -491,18 +491,22 @@ class UnshortenIt(object):
headers = {
"Cookie": hashlib.md5(ip+day).hexdigest() + "=1"
}
uri = uri.replace('sb/','sb1/')
uri = uri.replace('akv/','akv1/')
uri = uri.replace('wss/','wss1/')
uri = uri.replace('wsd/','wsd1/')
uri = uri.replace('sb/', 'sb1/')
uri = uri.replace('akv/', 'akv1/')
uri = uri.replace('wss/', 'wss1/')
uri = uri.replace('wsd/', 'wsd1/')
r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False)
uri = r.headers['location']
if 'Wait 1 hour' in r.data:
uri = ''
logger.info('IP bannato da vcrypt, aspetta un ora')
else:
uri = r.headers['location']
if "4snip" in uri:
if 'out_generator' in uri:
uri = re.findall('url=(.*)$', uri)[0]
else:
uri = decrypt(uri)
elif '/decode/' in uri:
uri = decrypt(uri.split('/')[-1])
return uri, r.code if r else 200

View File

@@ -366,7 +366,7 @@ def get_localized_string(code):
def get_localized_category(categ):
categories = {'movie': get_localized_string(30122), 'tvshow': get_localized_string(30123),
'anime': get_localized_string(30124), 'documentary': get_localized_string(30125),
'vos': get_localized_string(30136), 'vosi': get_localized_string(70566), 'adult': get_localized_string(30126),
'vos': get_localized_string(30136), 'sub-ita': get_localized_string(70566), 'adult': get_localized_string(30126),
'direct': get_localized_string(30137), 'torrent': get_localized_string(70015)}
return categories[categ] if categ in categories else categ

View File

@@ -515,6 +515,7 @@ def play_from_library(item):
return
else:
item = videolibrary.play(itemlist[seleccion])[0]
item.play_from = 'window'
platformtools.play_video(item)
from specials import autoplay

View File

@@ -22,7 +22,7 @@ from channelselector import get_thumb
from core import channeltools
from core import trakt_tools, scrapertoolsV2
from core.item import Item
from platformcode import logger
from platformcode import logger, keymaptools
from platformcode import unify
addon = xbmcaddon.Addon('plugin.video.kod')
@@ -587,7 +587,7 @@ def set_context_commands(item, parent_item):
if (item.channel != "videolibrary" and not config.get_localized_string(70585) in str(item.context)) \
or (item.channel != "videolibrary" and config.get_localized_string(70585) in str(item.context) and config.get_localized_string(70714) in str(item.context)):
# Añadir Serie a la videoteca
if item.action in ["episodios", "get_episodios"] and item.contentSerieName:
if item.action in ["episodios", "get_episodios", "get_seasons"] and item.contentSerieName:
context_commands.append((config.get_localized_string(60352), "XBMC.RunPlugin(%s?%s)" %
(sys.argv[0], item.clone(action="add_serie_to_library",
from_action=item.action).tourl())))
@@ -608,7 +608,8 @@ def set_context_commands(item, parent_item):
# elif item.contentSerieName:
# Descargar serie
elif item.contentType == "tvshow" and item.action in ["episodios"]:
elif (item.contentType == "tvshow" and item.action in ["episodios"]) or \
(item.contentType == "tvshow" and item.action in ['get_seasons'] and config.get_setting('show_seasons',item.channel) == False):
item.contentType == "tvshow"
context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s)" %
(sys.argv[0], item.clone(channel="downloads", action="save_download",
@@ -629,12 +630,12 @@ def set_context_commands(item, parent_item):
from_action=item.action).tourl())))
# Descargar temporada
# elif item.contentType == "season":
# context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s)" %
# (sys.argv[0], item.clone(channel="downloads", action="save_download",
# from_channel=item.channel,
# from_action=item.action,
# download='season').tourl())))
elif item.contentType == "season":
context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s)" %
(sys.argv[0], item.clone(channel="downloads", action="save_download",
from_channel=item.channel,
from_action=item.action,
download='season').tourl())))
# Abrir configuración
if parent_item.channel not in ["setting", "news", "search"]:
@@ -656,12 +657,17 @@ def set_context_commands(item, parent_item):
context_commands = sorted(context_commands, key=lambda comand: comand[0])
# Menu Rapido
context_commands.insert(0, (config.get_localized_string(60360),
"XBMC.RunPlugin(%s?%s)" % (sys.argv[0], Item(channel='side_menu',
action="open_shortcut_menu",
parent=parent_item.tourl()).tourl(
))))
context_commands.insert(1, (config.get_localized_string(70737),
"XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(channel='side_menu',
action="open_menu",
parent=parent_item.tourl()).tourl(
))))
if config.dev_mode():
context_commands.insert(1, ("item info",
context_commands.insert(2, ("item info",
"XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="itemInfo",
parent=item.tojson()).tourl())))
return context_commands
@@ -673,6 +679,8 @@ def is_playing():
def play_video(item, strm=False, force_direct=False, autoplay=False):
logger.info()
if item.play_from == 'window':
force_direct=True
# logger.debug(item.tostring('\n'))
logger.debug('item play: %s'%item)
xbmc_player = XBMCPlayer()
@@ -916,10 +924,10 @@ def get_dialogo_opciones(item, default_action, strm, autoplay):
# "Descargar"
import xbmcaddon
addon = xbmcaddon.Addon('plugin.video.kod')
downloadenabled = addon.getSetting('downloadenabled')
if downloadenabled != "false":
opcion = config.get_localized_string(30153)
opciones.append(opcion)
# downloadenabled = addon.getSetting('downloadenabled')
# if downloadenabled != "false":
# opcion = config.get_localized_string(30153)
# opciones.append(opcion)
if item.isFavourite:
# "Quitar de favoritos"

View File

@@ -253,7 +253,7 @@ def set_lang(language):
lat=['latino','lat','la', 'espanol latino', 'espaol latino', 'zl', 'mx', 'co', 'vl']
vose=['subtitulado','subtitulada','sub','sub espanol','vose','espsub','su','subs castellano',
'sub: español', 'vs', 'zs', 'vs', 'english-spanish subs', 'ingles sub espanol']
vosi=['sottotitolato','sottotitolata','sub','sub ita','vosi','sub-ita','subs italiano',
sub_ita=['sottotitolato','sottotitolata','sub','sub ita','subs italiano',
'sub: italiano', 'inglese sottotitolato']
vos=['vos', 'sub ingles', 'engsub','ingles subtitulado', 'sub: ingles']
vo=['ingles', 'en','vo', 'ovos', 'eng','v.o', 'english']
@@ -272,8 +272,8 @@ def set_lang(language):
language = 'lat'
elif language in ita:
language = 'ita'
elif language in vosi:
language = 'vosi'
elif language in sub_ita:
language = 'sub-ita'
elif language in vose:
language = 'vose'
elif language in vos:

View File

@@ -3,6 +3,7 @@ import hashlib
import io
import os
import shutil
from cStringIO import StringIO
from core import httptools, filetools, downloadtools
from core.ziptools import ziptools
@@ -100,7 +101,7 @@ def check_addon_init():
patched = apply_patch(text, (file['patch']+'\n').encode('utf-8'))
if patched != text: # non eseguo se già applicata (es. scaricato zip da github)
if getSha(patched) == file['sha']:
if getShaStr(patched) == file['sha']:
localFile.seek(0)
localFile.truncate()
localFile.writelines(patched)
@@ -141,7 +142,7 @@ def check_addon_init():
localCommitFile.truncate()
localCommitFile.writelines(c['sha'])
localCommitFile.close()
xbmc.executebuiltin("UpdateLocalAddons")
else:
logger.info('Nessun nuovo aggiornamento')
@@ -210,49 +211,95 @@ def apply_patch(s,patch,revert=False):
def getSha(path):
f = open(path).read()
return githash.generic_hash(path, '100644', len(f)).hexdigest()
return githash.blob_hash(path, len(f)).hexdigest()
def getShaStr(str):
return githash.blob_hash(StringIO(str), len(str)).hexdigest()
def updateFromZip():
dp = platformtools.dialog_progress_bg('Kodi on Demand', 'Aggiornamento in corso...')
dp = platformtools.dialog_progress_bg('Kodi on Demand', 'Installazione in corso...')
dp.update(0)
remotefilename = 'https://github.com/' + user + "/" + repo + "/archive/" + branch + ".zip"
localfilename = xbmc.translatePath("special://home/addons/") + "plugin.video.kod.update.zip"
localfilename = (xbmc.translatePath("special://home/addons/") + "plugin.video.kod.update.zip").encode('utf-8')
destpathname = xbmc.translatePath("special://home/addons/")
logger.info("remotefilename=%s" % remotefilename)
logger.info("localfilename=%s" % localfilename)
# pulizia preliminare
remove(localfilename)
removeTree(destpathname + "addon-" + branch)
import urllib
urllib.urlretrieve(remotefilename, localfilename, lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp))
urllib.urlretrieve(remotefilename, localfilename,
lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp))
# Lo descomprime
logger.info("decompressione...")
destpathname = xbmc.translatePath("special://home/addons/")
logger.info("destpathname=%s" % destpathname)
try:
hash = fixZipGetHash(localfilename)
unzipper = ziptools()
unzipper.extract(localfilename, destpathname)
import zipfile
with zipfile.ZipFile(io.FileIO(localfilename), "r") as zip_ref:
zip_ref.extractall(destpathname)
except Exception as e:
logger.info('Non sono riuscito ad estrarre il file zip')
logger.info(e)
dp.close()
return False
dp.update(95)
# puliamo tutto
shutil.rmtree(addonDir)
removeTree(addonDir)
filetools.rename(destpathname + "addon-" + branch, addonDir)
rename(destpathname + "addon-" + branch, addonDir)
logger.info("Cancellando il file zip...")
remove(localfilename)
dp.update(100)
dp.close()
xbmc.executebuiltin("UpdateLocalAddons")
return hash
def remove(file):
if os.path.isfile(file):
removed = False
while not removed:
try:
os.remove(file)
removed = True
except:
logger.info('File ' + file + ' NON eliminato')
def removeTree(dir):
if os.path.isdir(dir):
removed = False
while not removed:
try:
shutil.rmtree(dir)
removed = True
except:
logger.info('Cartella ' + dir + ' NON eliminato')
def rename(dir1, dir2):
renamed = False
while not renamed:
try:
filetools.rename(dir1, dir2)
renamed = True
except:
logger.info('cartella ' + dir1 + ' NON rinominata')
# https://stackoverflow.com/questions/3083235/unzipping-file-results-in-badzipfile-file-is-not-a-zip-file
def fixZipGetHash(zipFile):
f = io.FileIO(zipFile, 'r+b')

View File

@@ -21,6 +21,10 @@ msgctxt "#20000"
msgid "KOD"
msgstr ""
msgctxt "#20001"
msgid "eng"
msgstr ""
msgctxt "#30001"
msgid "Check for updates:"
msgstr ""
@@ -5660,3 +5664,7 @@ msgstr ""
msgctxt "#70735"
msgid "%s Special Episode Number"
msgstr ""
msgctxt "#70737"
msgid "[B]SIDE MENU[/B]"
msgstr ""

View File

@@ -21,6 +21,10 @@ msgctxt "#20000"
msgid "KOD"
msgstr "KOD"
msgctxt "#20001"
msgid "eng"
msgstr "ita"
msgctxt "#30001"
msgid "Check for updates:"
msgstr "Verifica aggiornamenti:"
@@ -5660,3 +5664,11 @@ msgstr "Seleziona gli Episodi della Stagione"
msgctxt "#70735"
msgid "%s Special Episode Number"
msgstr "Numero dell'Episodio Speciale %s"
msgctxt "#70736"
msgid "Completed Serie"
msgstr "Serie Completa"
msgctxt "#70737"
msgid "[B]SIDE MENU[/B]"
msgstr "[B]MENU LATERALE[/B]"

View File

@@ -106,7 +106,7 @@
<setting id="vose_color" type="labelenum" label="70142"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-10,true)+eq(-11,true)"/>
<setting id="vosi_color" type="labelenum" label="70566"
<setting id="sub-ita_color" type="labelenum" label="70566"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-11,true)+eq(-12,true)"/>
<setting id="vos_color" type="labelenum" label="70143"

View File

@@ -4,14 +4,18 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "https://(?:vidcloud.co|vcstream.to)/embed/([a-z0-9]+)",
"url": "https://vidcloud.co/player?fid=\\1&page=embed"
"pattern": "vcstream.to/(?:embed|f)/([A-z0-9]+)/([A-z0-9.]+)",
"url": "https://vcstream.to/embed/\\1/\\2"
},
{
"pattern": "https://vidcloud.co/v/([a-z0-9A-Z]+)",
"url": "https:\/\/vidcloud.co\/v\/\\1"
}
]
},
"free": true,
"id": "vidcloud",
"name": "vidcloud",
"id": "vcstream",
"name": "vcstream",
"settings": [
{
"default": false,
@@ -38,5 +42,5 @@
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/xjpwG0rK/0a-RVDzlb-400x400.jpg"
"thumbnail": "http://i.imgur.com/l45Tk0G.png"
}

58
servers/vcstream.py Normal file
View File

@@ -0,0 +1,58 @@
# -*- coding: utf-8 -*-
# Icarus pv7
# Fix dentaku65
import urlparse
from core import httptools
from core import scrapertools
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "We're Sorry" in data:
return False, config.get_localized_string(70292) % "vcstream"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
url = scrapertools.find_single_match(data, "url: '([^']+)',")
if url:
headers = dict()
headers['X-Requested-With'] = "XMLHttpRequest"
token = scrapertools.find_single_match(data, 'set-cookie: vidcloud_session=(.*?);')
token = token.replace("%3D", "")
if token:
headers['vidcloud_session'] = token
referer = scrapertools.find_single_match(data, "pageUrl = '([^']+)'")
if referer:
headers['Referer'] = referer
page_url = urlparse.urljoin(page_url, url)
data = httptools.downloadpage(page_url, headers=headers).data
data = data.replace('\\\\', '\\').replace('\\','')
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*:\s*"([^"]+)"\}')
for media_url in media_urls:
ext = "mp4"
if "m3u8" in media_url:
ext = "m3u8"
video_urls.append(["%s [vcstream]" % ext, media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,28 +0,0 @@
# Conector vidcloud By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Cloud] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = data.replace('\\\\', '\\').replace('\\','')
patron = '"file":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
if not ".vtt" in url:
video_urls.append(['vidcloud', url])
return video_urls

View File

@@ -7,9 +7,13 @@
"find_videos": {
"patterns": [
{
"pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)",
"url": "http:\/\/wstream.video\/video\/\\1"
}
"pattern": "wstream\\.video/video\\.php\\?file_code=([a-z0-9A-Z]+)",
"url": "http:\/\/wstream.video\/videoj\/\\1"
},
{
"pattern": "wstream\\.video/(?:embed-|videos/|video/|videow/|videoj/)?([a-z0-9A-Z]+)",
"url": "http:\/\/wstream.video\/videoj\/\\1"
}
],
"ignore_urls": [ ]
},

View File

@@ -15,6 +15,11 @@
"pattern": "youtube.com/v/([0-9A-Za-z_-]{11})",
"url": "http://www.youtube.com/watch?v=\\1"
}
,
{
"pattern": "youtu.be/([0-9A-Za-z_-]{11})",
"url": "http://www.youtube.com/watch?v=\\1"
}
]
},
"free": true,

View File

@@ -86,7 +86,7 @@ def extract_flashvars(data):
def extract_videos(video_id):
fmt_value = {
5: "240p h263 flv",
6: "240p h263 flv",
6: "270p h263 flv",
18: "360p h264 mp4",
22: "720p h264 mp4",
26: "???",
@@ -108,10 +108,18 @@ def extract_videos(video_id):
85: "1080p h264 3D",
100: "360p vp8 3D",
101: "480p vp8 3D",
102: "720p vp8 3D"
102: "720p vp8 3D",
91:"144 h264 mp4",
92:"240 h264 mp4",
93:"360 h264 mp4",
94:"480 h264 mp4",
95:"720 h264 mp4",
96:"1080 h264 mp4",
132:"240 h264 mp4",
151:"72 h264 mp4"
}
url = 'http://www.youtube.com/get_video_info?video_id=%s&eurl=https://youtube.googleapis.com/v/%s&ssl_stream=1' % \
# from core.support import dbg; dbg()
url = 'https://www.youtube.com/get_video_info?video_id=%s&eurl=https://youtube.googleapis.com/v/%s&ssl_stream=1' % \
(video_id, video_id)
data = httptools.downloadpage(url).data
@@ -132,53 +140,59 @@ def extract_videos(video_id):
js_signature = ""
youtube_page_data = httptools.downloadpage("http://www.youtube.com/watch?v=%s" % video_id).data
params = extract_flashvars(youtube_page_data)
data_flashvars =[]
if params.get('adaptive_fmts'):
data_flashvars += scrapertools.find_multiple_matches(params['adaptive_fmts'], '(fps.*?url[^,]+)')
if params.get('url_encoded_fmt_stream_map'):
data_flashvars = params["url_encoded_fmt_stream_map"].split(",")
for url_desc in data_flashvars:
url_desc_map = dict(urlparse.parse_qsl(url_desc))
if not url_desc_map.get("url") and not url_desc_map.get("stream"):
data_flashvars += params["url_encoded_fmt_stream_map"].split(",")
for url_desc in data_flashvars:
url_desc_map = dict(urlparse.parse_qsl(url_desc))
if not url_desc_map.get("url") and not url_desc_map.get("stream"):
continue
try:
key = int(url_desc_map["itag"])
if not fmt_value.get(key):
continue
try:
key = int(url_desc_map["itag"])
if not fmt_value.get(key):
continue
if url_desc_map.get("url"):
url = urllib.unquote(url_desc_map["url"])
elif url_desc_map.get("conn") and url_desc_map.get("stream"):
url = urllib.unquote(url_desc_map["conn"])
if url.rfind("/") < len(url) - 1:
url += "/"
url += urllib.unquote(url_desc_map["stream"])
elif url_desc_map.get("stream") and not url_desc_map.get("conn"):
url = urllib.unquote(url_desc_map["stream"])
if url_desc_map.get("url"):
url = urllib.unquote(url_desc_map["url"])
elif url_desc_map.get("conn") and url_desc_map.get("stream"):
url = urllib.unquote(url_desc_map["conn"])
if url.rfind("/") < len(url) - 1:
url += "/"
url += urllib.unquote(url_desc_map["stream"])
elif url_desc_map.get("stream") and not url_desc_map.get("conn"):
url = urllib.unquote(url_desc_map["stream"])
if url_desc_map.get("sig"):
url += "&signature=" + url_desc_map["sig"]
elif url_desc_map.get("s"):
sig = url_desc_map["s"]
if not js_signature:
urljs = scrapertools.find_single_match(youtube_page_data, '"assets":.*?"js":\s*"([^"]+)"')
urljs = urljs.replace("\\", "")
if urljs:
if not re.search(r'https?://', urljs):
urljs = urlparse.urljoin("https://www.youtube.com", urljs)
data_js = httptools.downloadpage(urljs).data
from jsinterpreter import JSInterpreter
funcname = scrapertools.find_single_match(data_js, '\.sig\|\|([A-z0-9$]+)\(')
if not funcname:
funcname = scrapertools.find_single_match(data_js, '["\']signature["\']\s*,\s*'
'([A-z0-9$]+)\(')
if not funcname:
funcname = scrapertools.find_single_match(data_js, r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(')
jsi = JSInterpreter(data_js)
js_signature = jsi.extract_function(funcname)
if url_desc_map.get("sig"):
url += "&signature=" + url_desc_map["sig"]
elif url_desc_map.get("s"):
sig = url_desc_map["s"]
if not js_signature:
urljs = scrapertools.find_single_match(youtube_page_data, '"assets":.*?"js":\s*"([^"]+)"')
urljs = urljs.replace("\\", "")
if urljs:
if not re.search(r'https?://', urljs):
urljs = urlparse.urljoin("https://www.youtube.com", urljs)
data_js = httptools.downloadpage(urljs).data
from jsinterpreter import JSInterpreter
funcname = scrapertools.find_single_match(data_js, '\.sig\|\|([A-z0-9$]+)\(')
if not funcname:
funcname = scrapertools.find_single_match(data_js, '["\']signature["\']\s*,\s*'
'([A-z0-9$]+)\(')
jsi = JSInterpreter(data_js)
js_signature = jsi.extract_function(funcname)
signature = js_signature([sig])
url += "&signature=" + signature
url = url.replace(",", "%2C")
video_urls.append(["(" + fmt_value[key] + ") [youtube]", url])
except:
import traceback
logger.info(traceback.format_exc())
signature = js_signature([sig])
url += "&sig=" + signature
url = url.replace(",", "%2C")
video_urls.append(["(" + fmt_value[key] + ") [youtube]", url])
except:
import traceback
logger.info(traceback.format_exc())
return video_urls

View File

@@ -3,31 +3,28 @@
"name": "Community",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["*"],
"thumbnail": "",
"banner": "",
"fanart": "",
"categories": [
"direct",
"movie",
"tvshow",
"vo"
],
"categories": [],
"settings": [
{
"id": "filterlanguages",
"id": "pagination",
"type": "list",
"label": "Mostrar enlaces del canal en idioma...",
"default": 3,
"label": "Pagination",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"No Filtrar",
"LAT",
"CAST",
"VO",
"VOSE"
]
"lvalues": ["@70708", "20", "40", "60", "80", "100"]
},
{
"id": "show_seasons",
"type": "bool",
"label": "Show Seasons",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,31 +1,32 @@
# -*- coding: utf-8 -*-
# -*- Channel Community -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import os
import re, os, inspect, requests, xbmc, xbmcaddon
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from channelselector import get_thumb
from core import tmdb
from core import httptools, scrapertoolsV2, servertools, jsontools, tmdb, support
from core.item import Item
from platformcode import logger, config, platformtools
from core.support import typo
from channelselector import get_thumb
from platformcode import config, platformtools
from specials import autoplay
from specials import filtertools
addon = xbmcaddon.Addon('metadata.themoviedb.org')
lang = addon.getSetting('language')
defpage = ["", "20", "40", "60", "80", "100"]
defp = defpage[config.get_setting('pagination','community')]
show_seasons = config.get_setting('show_seasons','community')
list_data = {}
list_language = ['ITA', 'SUB-ITA']
list_servers = ['directo', 'akvideo', 'verystream', 'openload']
list_quality = ['SD', '720', '1080', '4k']
tmdb_api = 'a1ab8b8669da03637a4b98fa39c39228'
def mainlist(item):
logger.info()
support.log()
path = os.path.join(config.get_data_path(), 'community_channels.json')
if not os.path.exists(path):
@@ -38,185 +39,554 @@ def mainlist(item):
def show_channels(item):
logger.info()
support.log()
itemlist = []
context = [{"title": config.get_localized_string(50005),
"action": "remove_channel",
"channel": "community"}]
"action": "remove_channel",
"channel": "community"}]
path = os.path.join(config.get_data_path(), 'community_channels.json')
file = open(path, "r")
json = jsontools.load(file.read())
itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70676), action='add_channel', thumbnail=get_thumb('add.png')))
itemlist.append(Item(channel=item.channel,
title=typo(config.get_localized_string(70676),'bold color kod'),
action='add_channel',
thumbnail=get_thumb('add.png')))
for key, channel in json['channels'].items():
file_path = channel ['path']
file_url = httptools.downloadpage(file_path, follow_redirects=True).data
# Find File Path
if 'http' in channel['path']: file_path = requests.get(channel['path']).url
else: file_path = channel['path']
# make relative path
path = os.path.dirname(os.path.abspath(file_path))
if 'http' in path: path = path[path.find('http'):].replace('\\','/').replace(':/','://')
if file_path.startswith('http'): file_url = httptools.downloadpage(file_path, follow_redirects=True).data
else: file_url = open(file_path, "r").read()
# load json
json_url = jsontools.load(file_url)
thumbnail = json_url['thumbnail'] if 'thumbnail' in json_url else ''
fanart = json_url['fanart'] if 'fanart' in json_url else ''
thumbnail = relative('thumbnail', json_url, path)
fanart = relative('fanart', json_url, path)
plot = json_url['plot'] if json_url.has_key('plot') else ''
itemlist.append(Item(channel=item.channel,
title=channel['channel_name'],
title=typo(channel['channel_name'],'bold'),
url=file_path,
thumbnail=thumbnail,
fanart=fanart,
plot=plot,
action='show_menu',
channel_id = key,
context=context))
context=context,
path=path))
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist
def load_json(item):
logger.info()
if item.url.startswith('http'):
json_file = httptools.downloadpage(item.url).data
else:
json_file = open(item.url, "r").read()
json_data = jsontools.load(json_file)
return json_data
def show_menu(item):
global list_data
logger.info()
itemlist = []
support.log()
json_data = load_json(item)
# If Second Level Menu
if item.menu:
menu = item.menu
item.menu = None
itemlist.append(item)
for key in menu:
if key != 'search':
if type(menu[key]) == dict:
title = menu[key]['title'] if menu[key].has_key('title') else item.title
thumbnail = relative('thumbnail', menu[key], item.path)
plot = menu[key]['plot'] if menu[key].has_key('plot') else ''
else:
title = menu[key]
thumbnail = item.thumbnail
plot = ''
if "menu" in json_data:
for option in json_data['menu']:
if 'thumbnail' in json_data:
thumbnail = option['thumbnail']
else:
thumbnail = ''
if 'fanart' in option and option['fanart']:
fanart = option['fanart']
else:
fanart = item.fanart
if 'plot' in option and option['plot']:
plot = option['plot']
else:
plot = item.plot
itemlist.append(Item(channel=item.channel, title=option['title'], thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=option['link']))
autoplay.show_option(item.channel, itemlist)
itemlist.append(Item(channel=item.channel,
title=typo(title,'submenu'),
url=item.url,
path=item.path,
thumbnail=thumbnail,
plot=plot,
action='submenu',
filterkey=key))
if menu.has_key('search'):
itemlist.append(Item(channel=item.channel,
title=typo('Cerca ' + item.fulltitle +'...','color kod bold'),
thumbnail=get_thumb('search.png'),
action='search',
url=item.url,
path=item.path))
return itemlist
if "movies_list" in json_data:
item.media_type='movies_list'
else:
json_data = load_json(item)
elif "tvshows_list" in json_data:
item.media_type = 'tvshows_list'
if "menu" in json_data:
for option in json_data['menu']:
thumbnail = relative('thumbnail', option, item.path)
fanart = relative('fanart', option, item.path)
plot = option['plot'] if option.has_key('plot') else item.plot
url = relative('link', option, item.path)
submenu = option['submenu'] if option.has_key('submenu') else []
level2 = option['level2'] if option.has_key('level2') else []
itemlist.append(Item(channel=item.channel,
title=format_title(option['title']),
fulltitle=option['title'],
thumbnail=thumbnail,
fanart=fanart,
plot=plot,
action='show_menu',
url=url,
path=item.path,
menu=level2))
elif "episodes_list" in json_data:
item.media_type = 'episodes_list'
if submenu:
for key in submenu:
if key != 'search':
if type(submenu[key]) == dict:
title = submenu[key]['title'] if submenu[key].has_key('title') else item.title
thumbnail = relative('thumbnail', submenu[key], item.path)
plot = submenu[key]['plot'] if submenu[key].has_key('plot') else ''
else:
title = submenu[key]
thumbnail = item.thumbnail
plot = ''
if "generic_list" in json_data:
item.media_type='generic_list'
itemlist.append(Item(channel=item.channel,
title=typo(title,'submenu'),
url=url,
path=item.path,
thumbnail=thumbnail,
plot=plot,
action='submenu',
filterkey=key))
if submenu.has_key('search'):
itemlist.append(Item(channel=item.channel,
title=typo('Cerca ' + option['title'] +'...','color kod bold'),
thumbnail=get_thumb('search.png'),
action='search',
url=url,
path=item.path))
# add Search
itemlist.append(Item(channel=item.channel,
title=typo('Cerca nel Canale...','color kod bold'),
thumbnail=get_thumb('search.png'),
action='search',
url=item.url,
path=item.path))
return itemlist
# select type of list
if json_data.has_key("movies_list"):
item.media_type = 'movies_list'
item.contentType = 'movie'
elif json_data.has_key("tvshows_list"):
item.media_type = 'tvshows_list'
item.contentType = 'tvshow'
elif json_data.has_key("episodes_list"):
item.media_type = 'episodes_list'
item.contentType = 'episode'
elif json_data.has_key("generic_list"):
item.media_type= 'generic_list'
return list_all(item)
def submenu(item):
support.log()
itemlist = []
filter_list = []
plot = item.plot
json_data = load_json(item)
if json_data.has_key("movies_list"): item.media_type= 'movies_list'
elif json_data.has_key("tvshows_list"): item.media_type = 'tvshows_list'
elif json_data.has_key("episodes_list"): item.media_type = 'episodes_list'
elif json_data.has_key("generic_list"): item.media_type= 'generic_list'
media_type = item.media_type
for media in json_data[media_type]:
if media.has_key(item.filterkey):
if type(media[item.filterkey]) == str and media[item.filterkey] not in filter_list:
filter_list.append(media[item.filterkey])
elif type(media[item.filterkey]) == list:
for f in media[item.filterkey]:
if f not in filter_list:
filter_list.append(f)
filter_list.sort()
for filter in filter_list:
if item.filterkey in ['director','actors']:
load_info = load_json('http://api.themoviedb.org/3/search/person/?api_key=' + tmdb_api + '&language=' + lang + '&query=' + filter)
if load_info:
id = str(load_info['results'][0]['id'])
if id:
info = load_json('http://api.themoviedb.org/3/person/'+ id + '?api_key=' + tmdb_api + '&language=' + lang)
if not info['biography']:
bio = load_json('http://api.themoviedb.org/3/person/'+ id + '?api_key=' + tmdb_api + '&language=en')['biography']
thumbnail = 'https://image.tmdb.org/t/p/w600_and_h900_bestv2' + info['profile_path'] if info['profile_path'] else item.thumbnail
plot += info['biography'] if info['biography'] else bio if bio else ''
itemlist.append(Item(channel=item.channel,
title=typo(filter, 'bold'),
url=item.url,
media_type=item.media_type,
action='list_filtered',
thumbnail=thumbnail,
plot=plot,
filterkey=item.filterkey,
filter=filter))
return itemlist
def list_all(item):
logger.info()
support.log('CONTENT TYPE ', item.contentType)
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
pagination = int(defp) if defp.isdigit() else ''
else: pagination = ''
pag = item.page if item.page else 1
itemlist = []
media_type = item.media_type
json_data = load_json(item)
for media in json_data[media_type]:
contentTitle = contentSerieName = ''
infoLabels = item.infoLabels if item.infoLabels else {}
quality, language, plot, poster = set_extra_values(media)
if json_data:
for i, media in enumerate(json_data[media_type]):
if pagination and (pag - 1) * pagination > i: continue # pagination
if pagination and i >= pag * pagination: break # pagination
title = media['title']
title = set_title(title, language, quality)
quality, language, plot, poster = set_extra_values(media, item.path)
new_item = Item(channel=item.channel, title=title, quality=quality,
language=language, plot=plot, thumbnail=poster)
fulltitle = media['title']
title = set_title(fulltitle, language, quality)
new_item.infoLabels['year'] = media['year'] if 'year' in media else ''
new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else ''
infoLabels['year'] = media['year'] if media.has_key('year')else ''
infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else ''
if 'movies_list' or 'generic_list' in json_data:
new_item.url = media
new_item.contentTitle = media['title']
new_item.action = 'findvideos'
else:
new_item.url = media['seasons_list']
new_item.contentSerieName = media['title']
new_item.action = 'seasons'
if 'movies_list' in json_data or 'generic_list' in json_data:
url= media
contentTitle = fulltitle
contentType = 'movie'
action='findvideos'
itemlist.append(new_item)
else:
contentSerieName = fulltitle
contentType = 'tvshow'
if media.has_key('seasons_list'):
url = media['seasons_list']
action = 'get_seasons'
else:
url = relative('link', media, item.path)
action = 'episodios'
if not 'generic_list' in json_data:
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel,
contentType=contentType,
title=format_title(title),
fulltitle=fulltitle,
show=fulltitle,
quality=quality,
language=language,
plot=plot,
personal_plot=plot,
thumbnail=poster,
path=item.path,
url=url,
contentTitle=contentTitle,
contentSerieName=contentSerieName,
infoLabels=infoLabels,
action=action))
if pagination and len(json_data[media_type]) >= pag * pagination:
if inspect.stack()[1][3] != 'get_newest':
itemlist.append(
Item(channel=item.channel,
action = item.action,
contentType=contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
fulltitle= item.fulltitle,
show= item.show,
url=item.url,
args=item.args,
page=pag + 1,
thumbnail=support.thumb()))
if not 'generic_list' in json_data:
tmdb.set_infoLabels(itemlist, seekTmdb=True)
for item in itemlist:
if item.personal_plot != item.plot and item.personal_plot:
item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot
return itemlist
def list_filtered(item):
support.log()
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
pagination = int(defp) if defp.isdigit() else ''
else: pagination = ''
pag = item.page if item.page else 1
itemlist = []
media_type = item.media_type
json_data = load_json(item)
contentTitle = contentSerieName = ''
infoLabels = item.infoLabels if item.infoLabels else {}
if json_data:
for i, media in enumerate(json_data[media_type]):
if pagination and (pag - 1) * pagination > i: continue # pagination
if pagination and i >= pag * pagination: break # pagination
if media.has_key(item.filterkey):
filter_keys = [it.lower() for it in media[item.filterkey]] if type(media[item.filterkey]) == list else media[item.filterkey].lower()
if item.filter.lower() in filter_keys:
quality, language, plot, poster = set_extra_values(media, item.path)
fulltitle = media['title']
title = set_title(fulltitle, language, quality)
infoLabels['year'] = media['year'] if media.has_key('year')else ''
infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else ''
if 'movies_list' in json_data or 'generic_list' in json_data:
url= media
contentTitle = fulltitle
contentType = 'movie'
action='findvideos'
else:
contentSerieName = fulltitle
contentType = 'tvshow'
if media.has_key('seasons_list'):
url = media['seasons_list']
action = 'get_seasons'
else:
url = relative('link', media, item.path)
action = 'episodios'
itemlist.append(Item(channel=item.channel,
contentType=contentType,
title=format_title(title),
fulltitle=fulltitle,
show=fulltitle,
quality=quality,
language=language,
plot=plot,
personal_plot=plot,
thumbnail=poster,
path=item.path,
url=url,
contentTitle=contentTitle,
contentSerieName=contentSerieName,
infoLabels=infoLabels,
action=action))
if pagination and len(json_data[media_type]) >= pag * pagination and len(itemlist) >= pag * pagination:
if inspect.stack()[1][3] != 'get_newest':
itemlist.append(
Item(channel=item.channel,
action = item.action,
contentType=contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
fulltitle= item.fulltitle,
show= item.show,
url=item.url,
args=item.args,
page=pag + 1,
thumbnail=support.thumb()))
if not 'generic_list' in json_data:
tmdb.set_infoLabels(itemlist, seekTmdb=True)
for item in itemlist:
if item.personal_plot != item.plot and item.personal_plot:
item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot
return itemlist
def seasons(item):
logger.info()
def get_seasons(item):
itm = item
support.log()
itemlist = []
infoLabels = item.infoLabels
infoLabels = item.infoLabels if item.infolabels else {}
list_seasons = item.url
for season in list_seasons:
infoLabels['season'] = season['season']
title = config.get_localized_string(60027) % season['season']
itemlist.append(Item(channel=item.channel, title=title, url=season['link'], action='episodesxseason',
contentSeasonNumber=season['season'], infoLabels=infoLabels))
url = relative('link', season, item.path)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist = sorted(itemlist, key=lambda i: i.title)
itemlist.append(Item(channel=item.channel,
title=format_title(title),
fulltitle=item.fulltitle,
show=item.show,
thumbnails=item.thumbnails,
url=url,
action='episodios',
contentSeason=season['season'],
infoLabels=infoLabels,
contentType='tvshow'))
if inspect.stack()[1][3] in ['add_tvshow', "get_seasons"] or show_seasons == False:
it = []
for item in itemlist:
it += episodios(item)
itemlist = it
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest']:
pagination = int(defp) if defp.isdigit() else ''
pag = itm.page if itm.page else 1
it = []
for i, item in enumerate(itemlist):
if pagination and (pag - 1) * pagination > i: continue # pagination
if pagination and i >= pag * pagination: break # pagination
it.append(item)
if pagination and len(itemlist) >= pag * pagination:
itm.page = pag + 1
itm.title=typo(config.get_localized_string(30992), 'color kod bold')
itm.thumbnail=support.thumb()
it.append(itm)
itemlist = it
else:
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist = sorted(itemlist, key=lambda i: i.title)
support.videolibrary(itemlist,item)
return itemlist
def episodesxseason(item):
logger.info()
def episodios(item):
support.log()
itm = item
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
pagination = int(defp) if defp.isdigit() else ''
else: pagination = ''
pag = item.page if item.page else 1
itemlist = []
json_data = load_json(item)
infoLabels = item.infoLabels
ep = 1
season = infoLabels['season'] if infoLabels.has_key('season') else item.contentSeason if item.contentSeason else 1
season_number = infoLabels['season']
for episode in json_data['episodes_list']:
episode_number = episode['number']
infoLabels['season'] = season_number
infoLabels['episode'] = episode_number
for i, episode in enumerate(json_data['episodes_list']):
if pagination and (pag - 1) * pagination > i: continue # pagination
if pagination and i >= pag * pagination: break # pagination
match = []
if episode.has_key('number'): match = support.match(episode['number'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0]
if not match and episode.has_key('title'): match = support.match(episode['title'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0]
if match:
episode_number = match[1]
ep = int(match[1]) + 1
season_number = match[0]
else:
season_number = episode['season'] if episode.has_key('season') else season if season else 1
episode_number = episode['number'] if episode.has_key('number') else ''
if not episode_number.isdigit():
episode_number = support.match(episode['title'], r'(?P<episode>\d+)')[0][0]
ep = int(episode_number) if episode_number else ep
if not episode_number:
episode_number = str(ep).zfill(2)
ep += 1
title = config.get_localized_string(70677) + ' %s' % (episode_number)
infoLabels['season'] = season_number
infoLabels['episode'] = episode_number
itemlist.append(Item(channel=item.channel, title=title, url=episode, action='findvideos',
contentEpisodeNumber=episode_number, infoLabels=infoLabels))
plot = episode['plot'] if episode.has_key('plot') else item.plot
thumbnail = episode['poster'] if episode.has_key('poster') else episode['thumbnail'] if episode.has_key('thumbnail') else item.thumbnail
title = ' - ' + episode['title'] if episode.has_key('title') else ''
title = '%sx%s%s' % (season_number, episode_number, title)
if season_number == item.filter or not item.filterseason:
itemlist.append(Item(channel= item.channel,
title= format_title(title),
fulltitle = item.fulltitle,
show = item.show,
url= episode,
action= 'findvideos',
plot= plot,
thumbnail= thumbnail,
contentSeason= season_number,
contentEpisode= episode_number,
infoLabels= infoLabels,
contentType= 'episode'))
if show_seasons == True and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes'] and not item.filterseason:
itm.contentType='season'
season_list = []
for item in itemlist:
if item.contentSeason not in season_list:
season_list.append(item.contentSeason)
itemlist = []
for season in season_list:
itemlist.append(Item(channel=item.channel,
title=format_title(config.get_localized_string(60027) % season),
fulltitle=itm.fulltitle,
show=itm.show,
thumbnails=itm.thumbnails,
url=itm.url,
action='episodios',
contentSeason=season,
infoLabels=infoLabels,
filterseason=season))
elif pagination and len(json_data['episodes_list']) >= pag * pagination:
if inspect.stack()[1][3] != 'get_newest':
itemlist.append(
Item(channel=item.channel,
action = item.action,
contentType='episode',
title=typo(config.get_localized_string(30992), 'color kod bold'),
fulltitle= item.fulltitle,
show= item.show,
url=item.url,
args=item.args,
page=pag + 1,
thumbnail=support.thumb()))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
support.log()
itemlist = []
if 'links' in item.url:
for url in item.url['links']:
quality, language, plot, poster = set_extra_values(url, item.path)
title = ''
title = set_title(title, language, quality)
for url in item.url['links']:
quality, language, plot, poster = set_extra_values(url)
title = ''
title = set_title(title, language, quality)
itemlist.append(Item(channel=item.channel, title=format_title('%s'+title), url=url['url'], action='play', quality=quality,
language=language, infoLabels = item.infoLabels))
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url['url'], action='play', quality=quality,
language=language, infoLabels = item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if inspect.stack()[2][3] != 'start_download':
autoplay.start(itemlist, item)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
return itemlist
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def add_channel(item):
logger.info()
support.log()
import xbmc
import xbmcgui
channel_to_add = {}
@@ -263,8 +633,9 @@ def add_channel(item):
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70683) % json_file['channel_name'])
return
def remove_channel(item):
logger.info()
support.log()
import xbmc
import xbmcgui
path = os.path.join(config.get_data_path(), 'community_channels.json')
@@ -284,8 +655,8 @@ def remove_channel(item):
return
def set_extra_values(dict):
logger.info()
def set_extra_values(dict, path):
support.log()
quality = ''
language = ''
plot = ''
@@ -298,22 +669,117 @@ def set_extra_values(dict):
if 'plot' in dict and dict['plot'] != '':
plot = dict['plot']
if 'poster' in dict and dict['poster'] != '':
poster = dict['poster']
poster = dict['poster']if ':/' in dict['poster'] else path + dict['poster'] if '/' in dict['poster'] else ''
elif 'thumbnail' in dict and dict['thumbnail'] != '':
poster = dict['thumbnail']if ':/' in dict['thumbnail'] else path + dict['thumbnail'] if '/' in dict['thumbnail'] else ''
return quality, language, plot, poster
def set_title(title, language, quality):
logger.info()
support.log()
if not config.get_setting('unify'):
if quality != '':
title += ' [%s]' % quality
title += typo(quality, '_ [] color kod')
if language != '':
if not isinstance(language, list):
title += ' [%s]' % language.upper()
title += typo(language.upper(), '_ [] color kod')
else:
title += ' '
for lang in language:
title += '[%s]' % lang.upper()
title += typo(lang.upper(), '_ [] color kod')
return title.capitalize()
return title
def format_title(title):
t = scrapertoolsV2.find_single_match(title, r'\{([^\}]+)\}')
if 'bold' not in t: t += ' bold'
title = re.sub(r'(\{[^\}]+\})','',title)
return typo(title,t)
def search(item, text):
support.log('Search ', text)
itemlist = []
json_data = load_json(item)
return load_links(item, itemlist, json_data, text)
def load_links(item, itemlist, json_data, text):
support.log()
def links(item, itemlist, json_data, text):
support.log()
if "movies_list" in json_data: media_type= 'movies_list'
elif "tvshows_list" in json_data: media_type = 'tvshows_list'
elif "episodes_list" in json_data: media_type = 'episodes_list'
if "generic_list" in json_data: media_type= 'generic_list'
if json_data:
for media in json_data[media_type]:
if text.lower() in media['title'].lower():
quality, language, plot, poster = set_extra_values(media, item.path)
title = media['title']
title = set_title(title, language, quality)
new_item = Item(channel=item.channel, title=format_title(title), quality=quality,
language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path)
new_item.infoLabels['year'] = media['year'] if 'year' in media else ''
new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else ''
if 'movies_list' in json_data or 'generic_list' in json_data:
new_item.url = media
new_item.contentTitle = media['title']
new_item.action = 'findvideos'
else:
new_item.url = media['seasons_list']
new_item.contentSerieName = media['title']
new_item.action = 'seasons'
itemlist.append(new_item)
if not 'generic_list' in json_data:
tmdb.set_infoLabels(itemlist, seekTmdb=True)
for item in itemlist:
if item.personal_plot != item.plot and item.personal_plot:
item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot
if json_data.has_key('menu'):
for option in json_data['menu']:
json_data = load_json(option['link'] if option['link'].startswith('http') else item.path+option['link'])
load_links(item, itemlist, json_data, text)
else:
links(item, itemlist, json_data, text)
return itemlist
def relative(key, json, path):
if json.has_key(key):
if key == 'thumbnail':
ret = json[key] if ':/' in json[key] else path + json[key] if '/' in json[key] else get_thumb(json[key]) if json[key] else ''
else:
ret = json[key] if ':/' in json[key] else path + json[key] if '/' in json[key] else ''
else:
ret = ''
return ret
def load_json(item):
support.log()
url= item if type(item) == str else item.url
try:
if url.startswith('http'):
json_file = httptools.downloadpage(url).data
else:
json_file = open(url, "r").read()
json_data = jsontools.load(json_file)
except:
json_data = {}
return json_data

View File

@@ -539,7 +539,10 @@ def download_from_server(item):
unsupported_servers = ["torrent"]
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70178) % item.server)
channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel])
if item.contentChannel == 'community':
channel = __import__('specials.%s' % item.contentChannel, None, None, ['specials.%s' % item.contentChannel])
else:
channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel])
if hasattr(channel, "play") and not item.play_menu:
progreso.update(50, config.get_localized_string(70178) % item.server, config.get_localized_string(60003) % item.contentChannel)
@@ -606,7 +609,10 @@ def download_from_best_server(item):
result = {"downloadStatus": STATUS_CODES.error}
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70179))
channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel])
if item.contentChannel == 'community':
channel = __import__('specials.%s' % item.contentChannel, None, None, ['specials.%s' % item.contentChannel])
else:
channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel])
progreso.update(50, config.get_localized_string(70184), config.get_localized_string(70180) % item.contentChannel)
@@ -652,7 +658,10 @@ def select_server(item):
"contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url))
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70179))
channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])
if item.contentChannel == 'community':
channel = __import__('specials.%s' % item.contentChannel, None, None, ['specials.%s' % item.contentChannel])
else:
channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel])
progreso.update(50, config.get_localized_string(70184), config.get_localized_string(70180) % item.contentChannel)
if hasattr(channel, item.contentAction):
@@ -720,8 +729,10 @@ def get_episodes(item):
# El item es uma serie o temporada
if item.contentType in ["tvshow", "season"]:
# importamos el canal
channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])
# Obtenemos el listado de episodios
if item.contentChannel == 'community':
channel = __import__('specials.%s' % item.contentChannel, None, None, ["specials.%s" % item.contentChannel])
else:
channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])
episodes = getattr(channel, item.contentAction)(item)
itemlist = []
@@ -818,7 +829,7 @@ def save_download(item):
item.contentAction = item.from_action if item.from_action else item.action
if item.contentType in ["tvshow", "episode", "season"]:
if 'download' in item:
if 'download' in item and config.get_setting('show_seasons',item.channel) == False:
heading = config.get_localized_string(70594) # <- Enter the season number
item.dlseason = platformtools.dialog_numeric(0, heading, '')
if item.dlseason:

View File

@@ -46,7 +46,7 @@ def mainlist(item):
list_canales, any_active = get_channels_list()
channel_language = config.get_setting("channel_language", default="auto")
if channel_language == 'auto':
channel_language = auto_filter()[0]
channel_language = auto_filter()
#if list_canales['peliculas']:
thumbnail = get_thumb("channels_movie.png")
@@ -134,7 +134,7 @@ def get_channels_list():
channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json')
channel_language = config.get_setting("channel_language", default="all")
if channel_language =="auto":
channel_language = auto_filter()[0]
channel_language = auto_filter()
for infile in sorted(glob.glob(channels_path)):
channel_id = os.path.basename(infile)[:-5]
@@ -393,13 +393,19 @@ def get_newest(channel_id, categoria):
def get_title(item):
support.log("ITEM NEWEST ->", item)
if item.contentSerieName: # Si es una serie
# item.contentSerieName c'è anche se è un film
if item.contentSerieName and item.contentType != 'movie': # Si es una serie
title = item.contentSerieName
#title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName)
if not scrapertools.get_season_and_episode(title) and item.contentEpisodeNumber:
# contentSeason non c'è in support
if not item.contentSeason:
item.contentSeason = '1'
title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2))
title = "%sx%s - %s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2), title)
else:
seas = scrapertools.get_season_and_episode(item.title)
if seas:
title = "%s - %s" % (seas, title)
elif item.contentTitle: # Si es una pelicula con el canal adaptado
title = item.contentTitle
@@ -409,9 +415,10 @@ def get_title(item):
title = item.title
# Limpiamos el titulo de etiquetas de formato anteriores
## title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title)
## title = re.compile("\[/*B\]", re.DOTALL).sub("", title)
## title = re.compile("\[/*I\]", re.DOTALL).sub("", title)
title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title)
title = re.compile("\[/*B\]", re.DOTALL).sub("", title)
title = re.compile("\[/*I\]", re.DOTALL).sub("", title)
title = '[B]'+title+'[/B]'
@@ -426,6 +433,10 @@ def get_title(item):
if item.quality:
title += support.typo(item.quality, '_ [] color kod')
season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.news) else ''
if season_:
title += season_
return title
@@ -434,7 +445,7 @@ def no_group(list_result_canal):
global channels_id_name
for i in list_result_canal:
support.log("NO GROUP i -> ", i)
#support.log("NO GROUP i -> ", i)
canale = channels_id_name[i.channel]
canale = canale # per differenziarlo dal colore delle altre voci
i.title = get_title(i) + " [" + canale + "]"
@@ -596,7 +607,7 @@ def setting_channel(item):
channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json')
channel_language = config.get_setting("channel_language", default="auto")
if channel_language == 'auto':
channel_language = auto_filter()[0]
channel_language = auto_filter()
list_controls = []

View File

@@ -475,7 +475,7 @@ def do_search(item, categories=None):
logger.info("channels_path=%s" % channels_path)
# channel_language = config.get_setting("channel_language", default="all")
channel_language = auto_filter()[0]
channel_language = auto_filter()
logger.info("channel_language=%s" % channel_language)
# Para Kodi es necesario esperar antes de cargar el progreso, de lo contrario

View File

@@ -104,6 +104,10 @@ def open_menu(item):
main.doModal()
del main
def open_shortcut_menu(item):
from platformcode import keymaptools
keymaptools.open_shortcut_menu()
class Main(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):

View File

@@ -42,7 +42,7 @@ def list_movies(item, silent=False):
for f in ficheros:
if f.endswith(".nfo"):
nfo_path = filetools.join(raiz, f)
#Sincronizamos las películas vistas desde la videoteca de Kodi con la de Alfa
try:
if config.is_xbmc(): #Si es Kodi, lo hacemos
@@ -50,12 +50,12 @@ def list_movies(item, silent=False):
xbmc_videolibrary.mark_content_as_watched_on_alfa(nfo_path)
except:
logger.error(traceback.format_exc())
head_nfo, new_item = videolibrarytools.read_nfo(nfo_path)
if not new_item: #Si no ha leído bien el .nfo, pasamos a la siguiente
continue
if len(new_item.library_urls) > 1:
multicanal = True
else:
@@ -67,7 +67,10 @@ def list_movies(item, silent=False):
for canal_org in new_item.library_urls:
canal = generictools.verify_channel(canal_org)
try:
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
if canal == 'community':
channel_verify = __import__('specials.%s' % canal, fromlist=["channels.%s" % canal])
else:
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
logger.debug('Channel %s seems correct' % channel_verify)
except:
dead_item = Item(multicanal=multicanal,
@@ -113,7 +116,7 @@ def list_movies(item, silent=False):
if not filetools.exists(filetools.join(new_item.path, filetools.basename(strm_path))):
# Si se ha eliminado el strm desde la bilbioteca de kodi, no mostrarlo
continue
# Menu contextual: Marcar como visto/no visto
visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0)
new_item.infoLabels["playcount"] = visto
@@ -165,7 +168,7 @@ def list_tvshows(item):
if f == "tvshow.nfo":
tvshow_path = filetools.join(raiz, f)
# logger.debug(tvshow_path)
#Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa
try:
if config.is_xbmc(): #Si es Kodi, lo hacemos
@@ -173,7 +176,7 @@ def list_tvshows(item):
xbmc_videolibrary.mark_content_as_watched_on_alfa(tvshow_path)
except:
logger.error(traceback.format_exc())
head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path)
if len(item_tvshow.library_urls) > 1:
@@ -187,7 +190,10 @@ def list_tvshows(item):
for canal in item_tvshow.library_urls:
canal = generictools.verify_channel(canal)
try:
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
if canal == 'community':
channel_verify = __import__('specials.%s' % canal, fromlist=["channels.%s" % canal])
else:
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
logger.debug('El canal %s parece correcto' % channel_verify)
except:
dead_item = Item(multicanal=multicanal,
@@ -236,7 +242,7 @@ def list_tvshows(item):
else:
texto_visto = config.get_localized_string(60021)
contador = 1
except:
logger.error('Not find: ' + str(tvshow_path))
logger.error(traceback.format_exc())
@@ -499,15 +505,18 @@ def findvideos(item):
for nom_canal, json_path in list_canales.items():
if filtro_canal and filtro_canal != nom_canal.capitalize():
continue
item_canal = Item()
item_canal.channel = nom_canal
nom_canal = item_canal.channel
# Importamos el canal de la parte seleccionada
try:
channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal])
if nom_canal == 'community':
channel = __import__('specials.%s' % nom_canal, fromlist=["channels.%s" % nom_canal])
else:
channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal])
except ImportError:
exec "import channels." + nom_canal + " as channel"
@@ -549,7 +558,7 @@ def findvideos(item):
server.channel = "videolibrary"
server.nfo = item.nfo
server.strm_path = item.strm_path
#### Compatibilidad con Kodi 18: evita que se quede la ruedecedita dando vueltas en enlaces Directos
if server.action == 'play':
server.folder = False
@@ -576,7 +585,10 @@ def play(item):
# logger.debug("item:\n" + item.tostring('\n'))
if not item.contentChannel == "local":
channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel])
if item.contentChannel == 'community':
channel = __import__('specials.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel])
else:
channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel])
if hasattr(channel, "play"):
itemlist = getattr(channel, "play")(item)
@@ -646,22 +658,22 @@ def update_tvshow(item):
def verify_playcount_series(item, path):
logger.info()
"""
Este método revisa y repara el PlayCount de una serie que se haya desincronizado de la lista real de episodios en su carpeta. Las entradas de episodios, temporadas o serie que falten, son creado con la marca de "no visto". Posteriormente se envia a verificar los contadores de Temporadas y Serie
En el retorno envía de estado de True si se actualizado o False si no, normalmente por error. Con este estado, el caller puede actualizar el estado de la opción "verify_playcount" en "videolibrary.py". La intención de este método es la de dar una pasada que repare todos los errores y luego desactivarse. Se puede volver a activar en el menú de Videoteca de Alfa.
"""
#logger.debug("item:\n" + item.tostring('\n'))
#Si no ha hecho nunca la verificación, lo forzamos
estado = config.get_setting("verify_playcount", "videolibrary")
if not estado or estado == False:
estado = True #Si no ha hecho nunca la verificación, lo forzamos
else:
estado = False
if item.contentType == 'movie': #Esto es solo para Series
return (item, False)
if filetools.exists(path):
@@ -670,7 +682,7 @@ def verify_playcount_series(item, path):
if not hasattr(it, 'library_playcounts') or not it.library_playcounts: #Si el .nfo no tiene library_playcounts se lo creamos
logger.error('** It does not have PlayCount')
it.library_playcounts = {}
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(path).next()
# Crear un item en la lista para cada strm encontrado
@@ -685,15 +697,15 @@ def verify_playcount_series(item, path):
if season_episode not in it.library_playcounts: #No está incluido el episodio
it.library_playcounts.update({season_episode: 0}) #actualizamos el playCount del .nfo
estado_update = True #Marcamos que hemos actualizado algo
if 'season %s' % season not in it.library_playcounts: #No está incluida la Temporada
it.library_playcounts.update({'season %s' % season: 0}) #actualizamos el playCount del .nfo
estado_update = True #Marcamos que hemos actualizado algo
if it.contentSerieName not in it.library_playcounts: #No está incluida la Serie
it.library_playcounts.update({item.contentSerieName: 0}) #actualizamos el playCount del .nfo
estado_update = True #Marcamos que hemos actualizado algo
if estado_update:
logger.error('** Estado de actualización: ' + str(estado) + ' / PlayCount: ' + str(it.library_playcounts))
estado = estado_update
@@ -713,35 +725,35 @@ def mark_content_as_watched2(item):
# logger.debug("item:\n" + item.tostring('\n'))
if filetools.exists(item.nfo):
head_nfo, it = videolibrarytools.read_nfo(item.nfo)
#logger.debug(it)
head_nfo, it = videolibrarytools.read_nfo(item.nfo)
#logger.debug(it)
if item.contentType == 'movie':
name_file = os.path.splitext(os.path.basename(item.nfo))[0]
if name_file != 'tvshow' :
it.library_playcounts.update({name_file: item.playcount})
it.library_playcounts.update({name_file: item.playcount})
if item.contentType == 'episode' or item.contentType == 'tvshow' or item.contentType == 'list' or name_file == 'tvshow':
# elif item.contentType == 'episode':
name_file = os.path.splitext(os.path.basename(item.strm_path))[0]
num_season = name_file [0]
item.__setattr__('contentType', 'episode')
item.__setattr__('contentSeason', num_season)
#logger.debug(name_file)
item.__setattr__('contentType', 'episode')
item.__setattr__('contentSeason', num_season)
#logger.debug(name_file)
else:
name_file = item.contentTitle
# logger.debug(name_file)
# logger.debug(name_file)
if not hasattr(it, 'library_playcounts'):
it.library_playcounts = {}
it.library_playcounts.update({name_file: item.playcount})
it.library_playcounts.update({name_file: item.playcount})
# se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
if item.contentType != 'movie':
it = check_season_playcount(it, item.contentSeason)
#logger.debug(it)
#logger.debug(it)
# Guardamos los cambios en item.nfo
if filetools.write(item.nfo, head_nfo + it.tojson()):
@@ -756,9 +768,9 @@ def mark_content_as_watched2(item):
if config.is_xbmc():
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_content_as_watched_on_kodi(item , item.playcount)
# logger.debug(item)
# logger.debug(item)
platformtools.itemlist_refresh()
platformtools.itemlist_refresh()
def mark_content_as_watched(item):
@@ -970,7 +982,7 @@ def check_tvshow_playcount(item, season):
temporadas_vistas_serie += 1
#logger.debug(temporadas_serie)
if temporadas_serie == temporadas_vistas_serie:
if temporadas_serie == temporadas_vistas_serie:
item.library_playcounts.update({item.title: 1})
else:
item.library_playcounts.update({item.title: 0})