This commit is contained in:
mac12m99
2019-07-18 14:15:09 +02:00
parent ccb68c8691
commit 694ff78a8f
31 changed files with 885 additions and 864 deletions

View File

@@ -5,6 +5,7 @@
<import addon="script.module.libtorrent" optional="true"/>
<import addon="metadata.themoviedb.org"/>
<import addon="metadata.tvdb.com"/>
<import addon="script.module.web-pdb" />
</requires>
<extension point="xbmc.python.pluginsource" library="default.py">
<provides>video</provides>

View File

@@ -8,8 +8,8 @@ from core.item import Item
from platformcode import logger, config
from specials import autoplay
# URL che reindirizza sempre al dominio corrente
# host = "https://altadefinizione01.to"
#URL che reindirizza sempre al dominio corrente
#host = "https://altadefinizione01.to"
__channel__ = "altadefinizione01"
host = config.get_channel_url(__channel__)
@@ -29,15 +29,15 @@ blacklist_categorie = ['Altadefinizione01', 'Altadefinizione.to']
def mainlist(item):
support.log()
itemlist = []
support.menu(itemlist, 'Al Cinema', 'peliculas', host + '/cinema/')
support.menu(itemlist, 'Ultimi Film Inseriti', 'peliculas', host)
support.menu(itemlist, 'Film Sub-ITA', 'peliculas', host + '/sub-ita/')
support.menu(itemlist, 'Film Ordine Alfabetico ', 'AZlist', host + '/catalog/')
support.menu(itemlist, 'Categorie Film', 'categories', host)
support.menu(itemlist, 'Cerca...', 'search')
itemlist =[]
support.menu(itemlist, 'Al Cinema','peliculas',host+'/cinema/')
support.menu(itemlist, 'Ultimi Film Inseriti','peliculas',host)
support.menu(itemlist, 'Film Sub-ITA','peliculas',host+'/sub-ita/')
support.menu(itemlist, 'Film Ordine Alfabetico ','AZlist',host+'/catalog/')
support.menu(itemlist, 'Categorie Film','categories',host)
support.menu(itemlist, 'Cerca...','search')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
@@ -46,16 +46,12 @@ def mainlist(item):
def categories(item):
support.log(item)
itemlist = support.scrape(item, '<li><a href="([^"]+)">(.*?)</a></li>', ['url', 'title'], headers,
'Altadefinizione01', patron_block='<ul class="kategori_list">(.*?)</ul>',
action='peliculas')
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas')
return support.thumb(itemlist)
def AZlist(item):
support.log()
return support.scrape(item, r'<a title="([^"]+)" href="([^"]+)"', ['title', 'url'], headers,
patron_block=r'<div class="movies-letter">(.*?)<\/div>', action='peliculas_list')
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list')
def newest(categoria):
@@ -71,7 +67,7 @@ def newest(categoria):
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -86,8 +82,11 @@ def search(item, texto):
item.url = "%s/index.php?do=search&story=%s&subaction=search" % (
host, texto)
try:
return peliculas(item)
# Continua la ricerca in caso di errore
if item.extra == "movie":
return subIta(item)
if item.extra == "tvshow":
return peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -102,10 +101,9 @@ def peliculas(item):
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
info = scrapertoolsV2.find_multiple_matches(data,
r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">')
info = scrapertoolsV2.find_multiple_matches(data, r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">')
infoLabels = {}
for infoLabels['year'], duration, scrapedplot, checkUrl in info:
if checkUrl == scrapedurl:
@@ -117,7 +115,7 @@ def peliculas(item):
fulltitle = scrapedtitle
if subDiv:
fulltitle += support.typo(subText + ' _ () color limegreen')
fulltitle += support.typo(scrapedquality.strip() + ' _ [] color kod')
fulltitle += support.typo(scrapedquality.strip()+ ' _ [] color kod')
itemlist.append(
Item(channel=item.channel,
@@ -134,22 +132,22 @@ def peliculas(item):
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.nextPage(itemlist, item, data, '<span>[^<]+</span>[^<]+<a href="(.*?)">')
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
return itemlist
def peliculas_list(item):
support.log()
item.fulltitle = ''
block = r'<tbody>(.*)<\/tbody>'
patron = r'<a href="([^"]+)" title="([^"]+)".*?> <img.*?src="([^"]+)".*?<td class="mlnh-3">([0-9]{4}).*?mlnh-4">([A-Z]+)'
return support.scrape(item, patron, ['url', 'title', 'thumb', 'year', 'quality'], patron_block=block)
return support.scrape(item,patron, ['url', 'title', 'thumb', 'year', 'quality'], patron_block=block)
def findvideos(item):
support.log()
itemlist = support.server(item, headers=headers)
return itemlist
return itemlist

View File

@@ -3,98 +3,131 @@
# -*- Riscritto per KOD -*-
# -*- By Greko -*-
# -*- last change: 04/05/2019
# -*- doppione di altadefinizione01
from specials import autoplay
from core import servertools, support
from core import channeltools, servertools, support
from core.item import Item
from platformcode import config, logger
from specials import autoplay
__channel__ = "altadefinizione01_club"
host = config.get_channel_url(__channel__)
# ======== Funzionalità =============================
checklinks = config.get_setting('checklinks', __channel__)
checklinks_number = config.get_setting('checklinks_number', __channel__)
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
list_servers = ['verystream','openload','rapidvideo','streamango']
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream','openload','supervideo','rapidvideo','streamango'] # per l'autoplay
list_quality = ['default']
@support.menu
# =========== home menu ===================
def mainlist(item):
film = ''
"""
Creo il menu principale del canale
:param item:
:return: itemlist []
"""
logger.info("%s mainlist log: %s" % (__channel__, item))
itemlist = []
filmSub = [
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
('Generi', ['', 'categorie', 'genres']),
('Lettera', ['/catalog/a/', 'categorie', 'orderalf']),
('Anni', ['', 'categorie', 'years']),
('Sub-ITA', ['/sub-ita/', 'peliculas', 'pellicola'])
]
# Menu Principale
support.menu(itemlist, 'Film Ultimi Arrivi bold', 'peliculas', host, args='pellicola')
support.menu(itemlist, 'Genere', 'categorie', host, args='genres')
support.menu(itemlist, 'Per anno submenu', 'categorie', host, args=['Film per Anno','years'])
support.menu(itemlist, 'Per lettera', 'categorie', host + '/catalog/a/', args=['Film per Lettera','orderalf'])
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/cinema/', args='pellicola')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/sub-ita/', args='pellicola')
support.menu(itemlist, 'Cerca film submenu', 'search', host, args = 'search')
return locals()
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist
# ======== def in ordine di menu ===========================
# =========== def per vedere la lista dei film =============
@support.scrape
def peliculas(item):
## import web_pdb; web_pdb.set_trace()
support.log('peliculas',item)
logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
itemlist = []
action="findvideos"
patron_block = r'<div id="dle-content">(.*?)<div class="page_nav">'
if item.args == "search":
patronBlock = r'</script> <div class="boxgrid caption">(.*?)<div id="right_bar">'
else:
patronBlock = r'<div class="cover_kapsul ml-mask">(.*?)<div class="page_nav">'
patron = r'<div class="cover boxcaption"> <h2>.<a href="(?P<url>[^"]+)">.*?<.*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+<div class="trdublaj"> (?P<quality>[A-Z]+)<[^>]+>(?:.[^>]+>(?P<lang>.*?)<[^>]+>).*?'\
'<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> '\
'[^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+).+?>'
patron_block = r'</table> </form>(.*?)<div class="search_bg">'
patron = r'<h2>.<a href="(.*?)".*?src="(.*?)".*?(?:|<div class="sub_ita">(.*?)</div>)[ ]</div>.*?<p class="h4">(.*?)</p>'
patronNext = '<span>\d</span> <a href="([^"]+)">'
listGroups = ['url', 'thumb', 'lang', 'title', 'year']
return locals()
patronNext = '<span>[^<]+</span>[^<]+<a href="(.*?)">'
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,patron_block=patron_block,
action='findvideos')
return itemlist
# =========== def pagina categorie ======================================
@support.scrape
def categorie(item):
support.log('categorie',item)
## import web_pdb; web_pdb.set_trace()
if item.args != 'orderalf': action = "peliculas"
else: action = 'orderalf'
blacklist = 'Altadefinizione01'
logger.info("%s mainlist categorie log: %s" % (__channel__, item))
itemlist = []
# da qui fare le opportuni modifiche
patron = r'<li><a href="(.*?)">(.*?)</a>'
action = 'peliculas'
if item.args == 'genres':
patronBlock = r'<ul class="kategori_list">(.*?)</ul>'
patron = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'years':
patronBlock = r'<ul class="anno_list">(.*?)</ul>'
patron = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'orderalf':
patronBlock = r'<div class="movies-letter">(.*)<div class="clearfix">'
patron = '<a title=.*?href="(?P<url>[^"]+)"><span>(?P<title>.*?)</span>'
bloque = r'<ul class="kategori_list">(.*?)</ul>'
elif item.args[1] == 'years':
bloque = r'<ul class="anno_list">(.*?)</ul>'
elif item.args[1] == 'orderalf':
bloque = r'<div class="movies-letter">(.*)<div class="clearfix">'
patron = r'<a title=.*?href="(.*?)"><span>(.*?)</span>'
action = 'orderalf'
listGroups = ['url', 'title']
patronNext = ''
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext, patron_block = bloque,
action=action)
return itemlist
return locals()
# =========== def pagina lista alfabetica ===============================
@support.scrape
def orderalf(item):
logger.info("%s mainlist orderalf log: %s" % (__channel__, item))
itemlist = []
support.log('orderalf',item)
listGroups = ['url', 'title', 'thumb', 'year', 'lang']
patron = r'<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td' #scrapertools.find_single_match(data, '<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td')
patronNext = r'<span>[^<]+</span>[^<]+<a href="(.*?)">'
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,
action='findvideos')
action= 'findvideos'
patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<'\
'[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>'
patronNext = r'<span>[^<]+</span>[^<]+<a href="(.*?)">'
return itemlist
return locals()
# =========== def pagina del film con i server per verderlo =============
def findvideos(item):
support.log('findvideos', item)
logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
itemlist = []
return support.server(item, headers=headers)
# =========== def per cercare film/serietv =============
@@ -104,7 +137,7 @@ def search(item, text):
itemlist = []
text = text.replace(" ", "+")
item.url = host + "/index.php?do=search&story=%s&subaction=search" % (text)
item.args = "search"
#item.extra = "search"
try:
return peliculas(item)
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
@@ -117,18 +150,16 @@ def search(item, text):
# =========== def per le novità nel menu principale =============
def newest(categoria):
support.log(categoria)
logger.info("%s mainlist newest log: %s" % (__channel__, categoria))
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
@@ -136,4 +167,4 @@ def newest(categoria):
logger.error("{0}".format(line))
return []
return itemlist
return itemlist

View File

@@ -4,9 +4,9 @@
"active": true,
"adult": false,
"language": ["ita"],
"fanart": "https://altadefinizione01.tools/templates/Dark/img/nlogo.png",
"thumbnail": "https://altadefinizione01.tools/templates/Dark/img/nlogo.png",
"banner": "https://altadefinizione01.tools/templates/Dark/img/nlogo.png",
"fanart": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
"thumbnail": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
"banner": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
"fix" : "reimpostato url e modificato file per KOD",
"change_date": "2019-30-04",
"categories": [
@@ -18,7 +18,7 @@
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://www.altadefinizione01.tools/",
"default": "https://altadefinizione01.estate/",
"enabled": true,
"visible": true
},

View File

@@ -2,7 +2,7 @@
# -*- Channel Altadefinizione01L Film - Serie -*-
# -*- By Greko -*-
##import channelselector
import channelselector
from specials import autoplay
from core import servertools, support, jsontools
from core.item import Item
@@ -11,66 +11,88 @@ from platformcode import config, logger
__channel__ = "altadefinizione01_link"
# ======== def per utility INIZIO ============================
host = config.get_setting("channel_host", __channel__)
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
list_quality = ['default']
host = config.get_setting("channel_host", __channel__)
headers = [['Referer', host]]
# =========== home menu ===================
@support.menu
def mainlist(item):
"""
Creo il menu principale del canale
:param item:
:return: itemlist []
"""
support.log()
itemlist = []
film = ''
filmSub = [
('Al Cinema', ['/film-del-cinema', 'peliculas']),
('Generi', ['', 'genres', 'genres']),
('Anni', ['', 'genres', 'years']),
('Mi sento fortunato', ['/piu-visti.html', 'genres', 'lucky']),
('Popolari', ['/piu-visti.html', 'peliculas', '']),
('Qualità', ['/piu-visti.html', 'genres', 'quality']),
('Sub-ITA', ['/sub-ita/', 'peliculas'])
]
# Menu Principale
support.menu(itemlist, 'Novità bold', 'peliculas', host)
support.menu(itemlist, 'Film per Genere', 'genres', host, args='genres')
support.menu(itemlist, 'Film per Anno submenu', 'genres', host, args='years')
support.menu(itemlist, 'Film per Qualità submenu', 'genres', host, args='quality')
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/film-del-cinema')
support.menu(itemlist, 'Popolari bold', 'peliculas', host + '/piu-visti.html')
support.menu(itemlist, 'Mi sento fortunato bold', 'genres', host, args='lucky')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/film-sub-ita/')
support.menu(itemlist, 'Cerca film submenu', 'search', host)
return locals()
# per autoplay
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist
# ======== def in ordine di action dal menu ===========================
@support.scrape
def peliculas(item):
#import web_pdb; web_pdb.set_trace()
support.log('peliculas',item)
support.log
itemlist = []
patron = r'class="innerImage">.*?href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\
'.*?class="ml-item-title">(?P<title>[^<]+)</.*?class="ml-item-label"> '\
'(?P<year>\d{4}) <.*?class="ml-item-label"> (?P<duration>\d+) .*?'\
'class="ml-item-label ml-item-label-.+?"> (?P<quality>.+?) <.*?'\
'class="ml-item-label"> (?P<lang>.+?) </'
patron = r'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)"'\
'.*?class="ml-item-title">([^<]+)</.*?class="ml-item-label"> (\d{4}) <'\
'.*?class="ml-item-label">.*?class="ml-item-label ml-item-label-.+?"> '\
'(.+?) </div>.*?class="ml-item-label"> (.+?) </'
listGroups = ['url', 'thumb', 'title', 'year', 'quality', 'lang']
patronNext = '<span>\d</span> <a href="([^"]+)">'
return locals()
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,
action='findvideos')
return itemlist
# =========== def pagina categorie ======================================
@support.scrape
def genres(item):
support.log
itemlist = []
#data = httptools.downloadpage(item.url, headers=headers).data
action = 'peliculas'
if item.args == 'genres':
patronBlock = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
bloque = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
elif item.args == 'years':
patronBlock = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
bloque = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
elif item.args == 'quality':
patronBlock = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
bloque = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
elif item.args == 'lucky': # sono i titoli random nella pagina, cambiano 1 volta al dì
patronBlock = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
bloque = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
action = 'findvideos'
patron = r'<li><a href="([^"]+)">(.*?)<'
patron = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
listGroups = ['url','title']
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patron_block = bloque,
action=action)
return locals()
return itemlist
# =========== def per cercare film/serietv =============
#host+/index.php?do=search&story=avatar&subaction=search
@@ -111,6 +133,15 @@ def newest(categoria):
return itemlist
def findvideos(item):
return support.server(item, headers=headers)
support.log()
itemlist = support.server(item, headers=headers)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -3,60 +3,41 @@
# Canale per altadefinizioneclick
# ----------------------------------------------------------
from specials import autoplay
import re
from core import servertools, support
from core.item import Item
from platformcode import config, logger
from platformcode import logger, config
from specials import autoplay
#host = config.get_setting("channel_host", 'altadefinizioneclick')
__channel__ = 'altadefinizioneclick'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', "vidoza", "thevideo", "okru", 'youtube']
list_quality = ['1080p']
@support.menu
checklinks = config.get_setting('checklinks', 'altadefinizioneclick')
checklinks_number = config.get_setting('checklinks_number', 'altadefinizioneclick')
headers = [['Referer', host]]
def mainlist(item):
support.log()
support.log()
itemlist = []
film = '' #'/nuove-uscite/'
filmSub = [
('Novità', ['/nuove-uscite/', 'peliculas']),
('Al Cinema', ['/film-del-cinema', 'peliculas']),
('Generi', ['', 'menu', 'Film']),
('Anni', ['', 'menu', 'Anno']),
('Qualità', ['', 'menu', 'Qualita']),
('Sub-ITA', ['/sub-ita/', 'peliculas'])
]
support.menu(itemlist, 'Film', 'peliculas', host + "/nuove-uscite/")
support.menu(itemlist, 'Per Genere submenu', 'menu', host, args='Film')
support.menu(itemlist, 'Per Anno submenu', 'menu', host, args='Anno')
support.menu(itemlist, 'Sub-ITA', 'peliculas', host + "/sub-ita/")
support.menu(itemlist, 'Cerca...', 'search', host, 'movie')
support.aplay(item, itemlist,list_servers, list_quality)
support.channel_config(item, itemlist)
return locals()
return itemlist
@support.scrape
def menu(item):
support.log()
action='peliculas'
patron = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)</a></li>'
patronBlock= r'<ul class="listSubCat" id="'+ str(item.args) + '">(.*?)</ul>'
return locals()
@support.scrape
def peliculas(item):
support.log()
if item.extra == 'search':
patron = r'<a href="(?P<url>[^"]+)">\s*<div class="wrapperImage">(?:<span class="hd">(?P<quality>[^<]+)'\
'<\/span>)?<img[^s]+src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)<[^<]+>'\
'(?:.*?IMDB:\s(\2[^<]+)<\/div>)?'
else:
patron = r'<img width[^s]+src="(?P<thumb>[^"]+)[^>]+><\/a>.*?<a href="(?P<url>[^"]+)">(?P<title>[^(?:\]|<)]+)'\
'(?:\[(?P<lang>[^\]]+)\])?<\/a>[^>]+>[^>]+>[^>]+>(?:\sIMDB\:\s(?P<rating>[^<]+)<)?'\
'(?:.*?<span class="hd">(?P<quality>[^<]+)<\/span>)?\s*<a'
# in caso di CERCA si apre la maschera di inserimento dati
patronNext = r'<a class="next page-numbers" href="([^"]+)">'
return locals()
def search(item, texto):
support.log("search ", texto)
@@ -96,6 +77,36 @@ def newest(categoria):
return itemlist
def menu(item):
support.log()
itemlist = support.scrape(item, '<li><a href="([^"]+)">([^<]+)</a></li>', ['url', 'title'], headers, patron_block='<ul class="listSubCat" id="'+ str(item.args) + '">(.*?)</ul>', action='peliculas')
return support.thumb(itemlist)
def peliculas(item):
support.log()
if item.extra == 'search':
patron = r'<a href="([^"]+)">\s*<div class="wrapperImage">(?:<span class="hd">([^<]+)<\/span>)?<img[^s]+src="([^"]+)"[^>]+>[^>]+>[^>]+>([^<]+)<[^<]+>(?:.*?IMDB:\s([^<]+)<\/div>)?'
elements = ['url', 'quality', 'thumb', 'title', 'rating']
else:
patron = r'<img width[^s]+src="([^"]+)[^>]+><\/a>.*?<a href="([^"]+)">([^(?:\]|<)]+)(?:\[([^\]]+)\])?<\/a>[^>]+>[^>]+>[^>]+>(?:\sIMDB\:\s([^<]+)<)?(?:.*?<span class="hd">([^<]+)<\/span>)?\s*<a'
elements =['thumb', 'url', 'title','lang', 'rating', 'quality']
itemlist = support.scrape(item, patron, elements, headers, patronNext='<a class="next page-numbers" href="([^"]+)">')
return itemlist
def findvideos(item):
support.log('findvideos', item)
return support.hdpass_get_servers(item)
support.log()
itemlist = support.hdpass_get_servers(item)
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item ,'color kod bold')
return itemlist

View File

@@ -58,7 +58,7 @@ def search(item, texto):
def last_ep(item):
log('ANIME PER TUTTI')
return support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url','title'],patronBlock='<ul class="mh-tab-content-posts">(.*?)<\/ul>', action='findvideos')
return support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url','title'],patron_block='<ul class="mh-tab-content-posts">(.*?)<\/ul>', action='findvideos')
def newest(categoria):
log('ANIME PER TUTTI')
@@ -83,7 +83,7 @@ def newest(categoria):
return itemlist
def genres(item):
itemlist = support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url', 'title'], action='peliculas', patronBlock=r'Generi.*?<ul.*?>(.*?)<\/ul>', blacklist=['Contattaci','Privacy Policy', 'DMCA'])
itemlist = support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url', 'title'], action='peliculas', patron_block=r'Generi.*?<ul.*?>(.*?)<\/ul>', blacklist=['Contattaci','Privacy Policy', 'DMCA'])
return support.thumb(itemlist)
def peliculas(item):

View File

@@ -45,10 +45,10 @@ def mainlist(item):
def generi(item):
log()
patronBlock = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
patron_block = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
patron = r'<a href="([^"]+)"\stitle="([^"]+)">'
return support.scrape(item, patron, ['url','title'], patronBlock=patronBlock, action='video')
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video')
# Crea Menu Filtro ======================================================
@@ -118,7 +118,7 @@ def search(item, texto):
# Lista A-Z ====================================================
def alfabetico(item):
return support.scrape(item, '<a href="([^"]+)" title="([^"]+)">', ['url', 'title'], patronBlock=r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>', action='lista_anime')
return support.scrape(item, '<a href="([^"]+)" title="([^"]+)">', ['url', 'title'], patron_block=r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>', action='lista_anime')
def lista_anime(item):
@@ -259,10 +259,9 @@ def video(item):
def episodios(item):
log()
itemlist = []
patron_block = r'server active(.*?)server hidden '
patron = r'<li><a [^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+" href="([^"]+)"[^>]+>([^<]+)<'
matches = support.match(item, patron, patronBlock)[0]
matches = support.match(item, patron, patron_block)[0]
for scrapedurl, scrapedtitle in matches:
itemlist.append(

View File

@@ -36,24 +36,31 @@ blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO &#x25b6; TROVA L&#8217
'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?']
@support.menu
def mainlist(item):
findhost()
film = ''
filmSub = [
('HD', ['', 'menu', 'Film HD Streaming']),
('Generi', ['', 'menu', 'Film per Genere']),
('Anni', ['', 'menu', 'Film per Anno'])
]
tvshow = '/serietv/'
tvshowSub = [
('Aggiornamenti serie tv', ['/serietv/aggiornamento-quotidiano-serie-tv/', 'last']),
('Per Lettera', ['/serietv/', 'menu', 'Serie-Tv per Lettera']),
('Per Genere', ['/serietv/aggiornamento-quotidiano-serie-tv/', 'menu', 'Serie-Tv per Genere']),
('Per anno', ['/serietv/aggiornamento-quotidiano-serie-tv/', 'menu', 'Serie-Tv per Anno'])
]
return locals()
autoplay.init(item.channel, list_servers, list_quality)
# Main options
itemlist = []
support.menu(itemlist, 'Ultimi 100 Film Aggiornati bold', 'last', host + '/lista-film-ultimi-100-film-aggiornati/')
support.menu(itemlist, 'Film bold', 'peliculas', host)
support.menu(itemlist, 'HD submenu', 'menu', host, args="Film HD Streaming")
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host, args="Film per Anno")
support.menu(itemlist, 'Cerca film... submenu', 'search', host, args='film')
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serietv/', contentType='tvshow')
support.menu(itemlist, 'Aggiornamenti serie tv', 'last', host + '/serietv/aggiornamento-quotidiano-serie-tv/', contentType='tvshow')
support.menu(itemlist, 'Per Lettera submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Lettera")
support.menu(itemlist, 'Per Genere submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Anno")
support.menu(itemlist, 'Cerca serie... submenu', 'search', host + '/serietv/', contentType='tvshow', args='serie')
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu(item):
@@ -103,7 +110,7 @@ def newest(categoria):
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
return support.scrape(item, r'<a href=([^>]+)>([^<([]+)(?:\[([A-Z]+)\])?\s\(([0-9]{4})\)<\/a>',
['url', 'title', 'quality', 'year'],
patronBlock=r'Ultimi 100 film aggiunti:.*?<\/td>')
patron_block=r'Ultimi 100 film aggiunti:.*?<\/td>')
def last(item):
@@ -163,22 +170,21 @@ def last(item):
return itemlist
@support.scrape
def peliculas(item):
support.log()
if item.contentType == 'movie' or '/serietv/' not in item.url:
patron = r'<div class="?card-image"?>.*?<img src="?(?P<thumb>[^" ]+)"? alt.*?<a href="?(?P<url>[^" >]+)(?:\/|")>(?P<title>[^<[(]+)(?:\[(?P<quality>[A-Za-z0-9/-]+)])? (?:\((?P<year>[0-9]{4})\))?.*?<strong>(?P<genre>[^<>&]+).*?DURATA (?P<duration>[0-9]+).*?<br(?: /)?>(?P<plot>[^<>]+)'
patron = r'<div class="?card-image"?>.*?<img src="?([^" ]+)"? alt.*?<a href="?([^" >]+)(?:\/|")>([^<[(]+)(?:\[([A-Za-z0-9/-]+)])? (?:\(([0-9]{4})\))?.*?<strong>([^<>&]+).*?DURATA ([0-9]+).*?<br(?: /)?>([^<>]+)'
listGroups = ['thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot']
action = 'findvideos'
else:
patron = r'div class="card-image">.*?<img src="(?P<thumb>[^ ]+)" alt.*?<a href="(?P<url>[^ >]+)">(?P<title>[^<[(]+)<\/a>.*?<strong><span style="[^"]+">(?P<genre>[^<>0-9(]+)\((?P<year>[0-9]{4}).*?</(?:p|div)>(?P<plot>.*?)</div'
patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?</(?:p|div)>(.*?)</div'
listGroups = ['thumb', 'url', 'title', 'genre', 'year', 'plot']
action = 'episodios'
patronBlock=[r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>',
'<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)']
patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">'
return locals()
return support.scrape(item, patron_block=[r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>',
'<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)'],
patron=patron, listGroups=listGroups,
patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">', blacklist=blacklist, action=action)
def episodios(item):

View File

@@ -64,7 +64,7 @@ def search(item, texto):
def genres(item):
return support.scrape(item, patronBlock=r'<div id="bordobar" class="dropdown-menu(.*?)</li>', patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', listGroups=['url', 'title'], action='video')
return support.scrape(item, patron_block=r'<div id="bordobar" class="dropdown-menu(.*?)</li>', patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', listGroups=['url', 'title'], action='video')
def video(item):

View File

@@ -108,11 +108,11 @@ def episodios(item):
def menu(item):
patronBlock = r'<ul class="sub-menu">.*?</ul>'
patron_block = r'<ul class="sub-menu">.*?</ul>'
patron = r'menu-category-list"><a href="([^"]+)">([^<]+)<'
list_groups = ["url", "title"]
return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patronBlock=patronBlock)
return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patron_block=patron_block)
def search(item, texto):

View File

@@ -68,24 +68,13 @@ def newest(categoria):
def peliculas(item):
#<<<<<<< newScrape
#<<<<<<< stable
# itemlist = scrape(item, r'Lingua[^<]+<br>\s*<a href="(?:Lista episodi )?([^"]+)" title="(?:Lista episodi )?(.*?)(?: \(([0-9]+)\))?(?: Streaming)?">', ['url', 'title', 'year'], action='episodios', patron_block='<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">', patronNext='<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">')
# renumber(itemlist)
# return itemlist
#=======
# itemlist = scrape(item, r'Lingua[^<]+<br>\s*<a href="(?:Lista episodi )?([^"]+)" title="(?:Lista episodi )?(.*?)(?: \(([0-9]+)\))?(?: Streaming)?">', ['url', 'title', 'year'], action='episodios', patronBlock='<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">', patronNext='<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">')
# return renumber(itemlist)
#>>>>>>> newScrape
#=======
itemlist = scrape(item, r'Lingua[^<]+<br>\s*<a href="(?:Lista episodi )?([^"]+)" title="(?:Lista episodi )?(.*?)(?: \(([0-9]+)\))?(?: Streaming)?">', ['url', 'title', 'year'], action='episodios', patron_block='<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">', patronNext='<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">')
renumber(itemlist)
return itemlist
#>>>>>>> master
def last(item):
return scrape(item, r'<li><a href="([^"]+)"[^>]+>([^<]+)(\d+)<br>', ['url', 'title', 'episode'], patronBlock='<ul class="last" id="recentAddedEpisodesAnimeDDM">(.*?)</ul>' )
return scrape(item, r'<li><a href="([^"]+)"[^>]+>([^<]+)(\d+)<br>', ['url', 'title', 'episode'], patron_block='<ul class="last" id="recentAddedEpisodesAnimeDDM">(.*?)</ul>' )
def categorie(item):
@@ -105,20 +94,9 @@ def categorie(item):
def episodios(item):
#<<<<<<< newScrape
#<<<<<<< stable
# itemlist = scrape(item, r'<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>', ['url','title','title2'], patron_block='<div class="seasonEp">(.*?)<div class="footer">')
# renumber(itemlist, item, 'bold')
# return itemlist
#=======
# itemlist = scrape(item, r'<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>', ['url','title','title2'], patronBlock='<div class="seasonEp">(.*?)<div class="footer">')
# return renumber(itemlist, item, 'bold')
#>>>>>>> newScrape
#=======
itemlist = scrape(item, r'<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>', ['url','title','title2'], patron_block='<div class="seasonEp">(.*?)<div class="footer">')
renumber(itemlist, item, 'bold')
return itemlist
#>>>>>>> master
def findvideos(item):
log()

View File

@@ -4,15 +4,15 @@
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://eurostreaming.pink/wp-content/uploads/2017/08/logocafe.png",
"bannermenu": "https://eurostreaming.pink/wp-content/uploads/2017/08/logocafe.png",
"thumbnail": "https://eurostreaming.cafe/wp-content/uploads/2017/08/logocafe.png",
"bannermenu": "https://eurostreaming.cafe/wp-content/uploads/2017/08/logocafe.png",
"categories": ["tvshow","anime","vosi"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://eurostreaming.pink",
"default": "https://eurostreaming.cafe",
"enabled": true,
"visible": true
},
@@ -40,23 +40,6 @@
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",

View File

@@ -4,15 +4,22 @@
# by Greko
# ------------------------------------------------------------
"""
Riscritto per poter usufruire del decoratore support.scrape
Riscritto per poter usufruire del modulo support.
Problemi noti:
Le regex non prendono tutto...
server versystream : 'http://vcrypt.net/very/' # VeryS non decodifica il link :http://vcrypt.net/fastshield/
alcuni server tra cui nowvideo.club non sono implementati nella cartella servers
Alcune sezioni di anime-cartoni non vanno, alcune hanno solo la lista degli episodi, ma non hanno link
altre cambiano la struttura
La sezione novità non fa apparire il titolo degli episodi
In episodios è stata aggiunta la possibilità di configurare la videoteca
"""
#import channelselector
#from specials import autoplay#, filtertools
from core import scrapertoolsV2, httptools, support#, servertools, tmdb
import channelselector
from specials import autoplay, filtertools
from core import scrapertoolsV2, httptools, servertools, tmdb, support
from core.item import Item
from platformcode import logger, config
@@ -23,8 +30,8 @@ headers = ['Referer', host]
list_servers = ['verystream', 'wstream', 'speedvideo', 'flashx', 'nowvideo', 'streamango', 'deltabit', 'openload']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'cineblog01')
checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'eurostreaming')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'eurostreaming')
IDIOMAS = {'Italiano': 'ITA', 'Sub-ITA':'vosi'}
list_language = IDIOMAS.values()
@@ -41,46 +48,39 @@ def mainlist(item):
support.menu(itemlist, 'Cerca...', 'search', host, contentType = 'tvshow')
## itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
# autoplay
support.aplay(item, itemlist, list_servers, list_quality)
# configurazione canale
# richiesto per autoplay
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist
@support.scrape
def serietv(item):
## import web_pdb; web_pdb.set_trace()
#import web_pdb; web_pdb.set_trace()
# lista serie tv
support.log()
itemlist = []
if item.args:
#patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"\s+target="_blank">(.*?)<\/a>'
## # DA SISTEMARE - problema: mette tutti gli episodi in sub-ita
## patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"'\
## '\s+target="_blank">(\d+x\d+) (.*?)(?:|\((.+?)\))</a>'
patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"'\
'\s+target="_blank">(\d+x\d+) (.*?)</a>'
listGroups = ['title', 'url', 'episode', 'title2']
# il titolo degli episodi viene inglobato in episode ma non sono visibili in newest!!!
patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"\s+target="_blank">(.*?)<\/a>'
listGroups = ['title', 'url', 'title2']
patronNext = ''
# permette di vedere episodio e titolo + titolo2 in novità
def itemHook(item):
item.show = item.episode + item.title
return item
else:
patron = r'<div class="post-thumb">.*?\s<img src="([^"]+)".*?><a href="([^"]+)".*?>(.*?(?:\((\d{4})\)|(\d{4}))?)<\/a><\/h2>'
listGroups = ['thumb', 'url', 'title', 'year', 'year']
patronNext='a class="next page-numbers" href="?([^>"]+)">Avanti &raquo;</a>'
action='episodios'
return locals()
@support.scrape
itemlist = support.scrape(item, patron_block='', patron=patron, listGroups=listGroups,
patronNext=patronNext, action='episodios')
return itemlist
def episodios(item):
## import web_pdb; web_pdb.set_trace()
support.log("episodios: %s" % item)
support.log("episodios")
itemlist = []
item.contentType = 'episode'
# Carica la pagina
data = httptools.downloadpage(item.url).data
#========
@@ -97,17 +97,46 @@ def episodios(item):
patron = r'(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?<\/div>'\
'<div class="su-spoiler-content su-clearfix" style="display:none">|'\
'(?:\s|\Wn)?(?:<strong>)?(\d+&#.*?)(?:|)?<a\s(.*?)<\/a><br\s\/>)'
## '(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?'\
## '<\/div><div class="su-spoiler-content su-clearfix" style="display:none">|'\
## '(?:\s|\Wn)?(?:<strong>)?(\d[&#].*?)(?:|\W)?<a\s(.*?)<\/a><br\s\/>)'
## '(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?<\/div>'\
## '<div class="su-spoiler-content su-clearfix" style="display:none">|'\
## '\s(?:<strong>)?(\d[&#].*?)<a\s(.*?)<\/a><br\s\/>)'
listGroups = ['lang', 'title', 'url']
itemlist = support.scrape(item, data=data, patron=patron,
listGroups=listGroups, action='findvideos')
listGroups = ['lang', 'title', 'url']
action = 'findvideos'
# Permette la configurazione della videoteca senza andare nel menu apposito
# così si possono Attivare/Disattivare le impostazioni direttamente dalla
# pagina delle puntate
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Videoteca color lime"),
plot = 'Filtra per lingua utilizzando la configurazione della videoteca.\
Escludi i video in sub attivando "Escludi streams... " e aggiungendo sub in Parole',
config='videolibrary', #item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png')
))
return locals()
itemlist = filtertools.get_links(itemlist, item, list_language)
return itemlist
# =========== def findvideos =============
def findvideos(item):
support.log('findvideos', item)
return support.server(item, item.url)
support.log()
itemlist =[]
# Requerido para FilterTools
## itemlist = filtertools.get_links(itemlist, item, list_language)
itemlist = support.server(item, item.url)
## support.videolibrary(itemlist, item)
return itemlist
# =========== def ricerca =============
def search(item, texto):
@@ -145,3 +174,6 @@ def newest(categoria):
return []
return itemlist
def paginator(item):
pass

View File

@@ -208,13 +208,13 @@ def findvideos(item):
itemlist = []
# data = httptools.downloadpage(item.url, headers=headers).data
patronBlock = '<div class="entry-content">(.*?)<footer class="entry-footer">'
# bloque = scrapertools.find_single_match(data, patronBlock)
patron_block = '<div class="entry-content">(.*?)<footer class="entry-footer">'
# bloque = scrapertools.find_single_match(data, patron_block)
patron = r'<a href="([^"]+)">'
# matches = re.compile(patron, re.DOTALL).findall(bloque)
matches, data = support.match(item, patron, patronBlock, headers)
matches, data = support.match(item, patron, patron_block, headers)
for scrapedurl in matches:
if 'is.gd' in scrapedurl:

View File

@@ -4,18 +4,15 @@
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ------------------------------------------------------------
"""
Problemi noti:
- nella pagina categorie appaiono i risultati di tmdb in alcune voci
"""
import re
from core import scrapertoolsV2, httptools, support
from core import httptools, scrapertools, support
from core import tmdb
from core.item import Item
from platformcode import logger, config
from core.support import log
from platformcode import logger, config
__channel__ = 'guardaserieclick'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
@@ -33,154 +30,34 @@ def mainlist(item):
itemlist = []
support.menu(itemlist, 'Serie', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['news'])
support.menu(itemlist, 'Ultimi Aggiornamenti submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args= ['update'])
support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow', args=['cat'])
support.menu(itemlist, 'Serie inedite Sub-ITA submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['inedite'])
support.menu(itemlist, 'Da non perdere bold submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'da non perdere'])
support.menu(itemlist, 'Classiche bold submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'classiche'])
support.menu(itemlist, 'Disegni che si muovono sullo schermo per magia bold', 'tvserie', "%s/category/animazione/" % host, 'tvshow', args= ['anime'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow', args=['cerca'])
# autoplay
support.menu(itemlist, 'Novità bold', 'serietvaggiornate', "%s/lista-serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Nuove serie', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Serie inedite Sub-ITA', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['inedite'])
support.menu(itemlist, 'Da non perdere bold', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'da non perdere'])
support.menu(itemlist, 'Classiche bold', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'classiche'])
support.menu(itemlist, 'Anime', 'lista_serie', "%s/category/animazione/" % host, 'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow', args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow', args=['serie'])
support.aplay(item, itemlist, list_servers, list_quality)
# configurazione del canale
support.channel_config(item, itemlist)
return itemlist
@support.scrape
def serietv(item):
## import web_pdb; web_pdb.set_trace()
log('serietv ->\n')
##<<<<<<< HEAD
##
## action = 'episodios'
## listGroups = ['url', 'thumb', 'title']
## patron = r'<a href="([^"]+)".*?> <img\s.*?src="([^"]+)" \/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/p>'
## if 'news' in item.args:
## patronBlock = r'<div class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
## elif 'inedite' in item.args:
## patronBlock = r'<div class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
## elif 'da non perdere' in item.args:
## patronBlock = r'<div class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
## elif 'classiche' in item.args:
## patronBlock = r'<div class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
## elif 'update' in item.args:
## listGroups = ['url', 'thumb', 'episode', 'lang', 'title']
## patron = r'rel="nofollow" href="([^"]+)"[^>]+> <img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(\d+.\d+) \((.+?)\).<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
## patronBlock = r'meta-slug="lastep">(.*?)</div></div><div'
## # permette di vedere episodio + titolo + titolo2 in novità
## def itemHook(item):
## item.show = item.episode + item.title
## return item
## return locals()
##
##@support.scrape
##def tvserie(item):
##
## action = 'episodios'
## listGroups = ['url', 'thumb', 'title']
## patron = r'<a\shref="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
## patronBlock = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\sclass="container-fluid whitebg" style="">'
## patronNext = r'<link\s.*?rel="next"\shref="([^"]+)"'
##
## return locals()
##
##@support.scrape
##def episodios(item):
## log('episodios ->\n')
## item.contentType = 'episode'
##
## action = 'findvideos'
## listGroups = ['episode', 'lang', 'title2', 'plot', 'title', 'url']
## patron = r'class="number-episodes-on-img"> (\d+.\d+)(?:|[ ]\((.*?)\))<[^>]+>'\
## '[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
## '(.*?)<[^>]+></div></div>.<span\s.+?meta-serie="(.*?)" meta-stag=(.*?)</span>'
##
## return locals()
##
##=======
action = 'episodios'
listGroups = ['url', 'thumb', 'title']
patron = r'<a href="([^"]+)".*?> <img\s.*?src="([^"]+)" \/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/p>'
if 'news' in item.args:
patron_block = r'<div class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
elif 'inedite' in item.args:
patron_block = r'<div class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
elif 'da non perdere' in item.args:
patron_block = r'<div class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
elif 'classiche' in item.args:
patron_block = r'<div class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
elif 'update' in item.args:
listGroups = ['url', 'thumb', 'episode', 'lang', 'title']
patron = r'rel="nofollow" href="([^"]+)"[^>]+> <img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(\d+.\d+) \((.+?)\).<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
patron_block = r'meta-slug="lastep">(.*?)</div></div><div'
# permette di vedere episodio + titolo + titolo2 in novità
def itemHook(item):
item.show = item.episode + item.title
return item
return locals()
@support.scrape
def tvserie(item):
action = 'episodios'
listGroups = ['url', 'thumb', 'title']
patron = r'<a\shref="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
patron_block = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\sclass="container-fluid whitebg" style="">'
patronNext = r'<link\s.*?rel="next"\shref="([^"]+)"'
return locals()
@support.scrape
def episodios(item):
log('episodios ->\n')
item.contentType = 'episode'
action = 'findvideos'
listGroups = ['episode', 'lang', 'title2', 'plot', 'title', 'url']
patron = r'class="number-episodes-on-img"> (\d+.\d+)(?:|[ ]\((.*?)\))<[^>]+>'\
'[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
'(.*?)<[^>]+></div></div>.<span\s.+?meta-serie="(.*?)" meta-stag=(.*?)</span>'
return locals()
##>>>>>>> a72130e0324ae485ae5f39d3d8f1df46c365fa5b
def findvideos(item):
log()
return support.server(item, item.url)
@support.scrape
def categorie(item):
log
action = 'tvserie'
listGroups = ['url', 'title']
patron = r'<li>\s<a\shref="([^"]+)"[^>]+>([^<]+)</a></li>'
patron_block = r'<ul\sclass="dropdown-menu category">(.*?)</ul>'
return locals()
# ================================================================================================================
##
### ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
log()
itemlist = []
item = Item()
item.contentType= 'episode'
item.args = 'update'
try:
if categoria == "series":
item.url = "%s/lista-serie-tv" % host
item.action = "serietv"
itemlist = serietv(item)
item.action = "serietvaggiornate"
itemlist = serietvaggiornate(item)
if itemlist[-1].action == "serietv":
if itemlist[-1].action == "serietvaggiornate":
itemlist.pop()
# Continua la ricerca in caso di errore
@@ -192,18 +69,207 @@ def newest(categoria):
return itemlist
### ================================================================================================================
### ----------------------------------------------------------------------------------------------------------------
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
log(texto)
item.url = host + "/?s=" + texto
item.args = 'cerca'
try:
return tvserie(item)
return lista_serie(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()).replace('"', "'")
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def nuoveserie(item):
log()
itemlist = []
patron_block = ''
if 'inedite' in item.args:
patron_block = r'<div class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
elif 'da non perdere' in item.args:
patron_block = r'<div class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
elif 'classiche' in item.args:
patron_block = r'<div class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
else:
patron_block = r'<div class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
patron = r'<a href="([^"]+)".*?><img\s.*?src="([^"]+)" \/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/p>'
matches = support.match(item, patron, patron_block, headers)[0]
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tvshow",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
thumbnail=scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def serietvaggiornate(item):
log()
itemlist = []
patron_block = r'<div class="container\s*container-title-serie-lastep\s*container-scheda" meta-slug="lastep">(.*?)<\/div><\/div><div'
patron = r'<a rel="nofollow"\s*href="([^"]+)"[^>]+><img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
matches = support.match(item, patron, patron_block, headers)[0]
for scrapedurl, scrapedthumbnail, scrapedep, scrapedtitle in matches:
episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodioso
scrapedtitle = cleantitle(scrapedtitle)
contentlanguage = ""
if 'sub-ita' in scrapedep.strip().lower():
contentlanguage = 'Sub-ITA'
extra = r'<span\s.*?meta-stag="%s" meta-ep="%s" meta-embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"[^>]*>' % (
episode[0][0], episode[0][1].lstrip("0"))
infoLabels = {}
infoLabels['episode'] = episode[0][1].zfill(2)
infoLabels['season'] = episode[0][0]
title = str(
"%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], contentlanguage)).strip()
itemlist.append(
Item(channel=item.channel,
action="findepvideos",
contentType="tvshow",
title=title,
show=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
extra=extra,
thumbnail=scrapedthumbnail,
contentLanguage=contentlanguage,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
log()
return support.scrape(item, r'<li>\s<a\shref="([^"]+)"[^>]+>([^<]+)</a></li>', ['url', 'title'], patron_block=r'<ul\sclass="dropdown-menu category">(.*?)</ul>', headers=headers, action="lista_serie")
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
log()
itemlist = []
patron_block = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\sclass="container-fluid whitebg" style="">'
patron = r'<a\shref="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
return support.scrape(item, patron, ['url', 'thumb', 'title'], patron_block=patron_block, patronNext=r"<link\s.*?rel='next'\shref='([^']*)'", action='episodios')
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
log()
itemlist = []
patron = r'<div\sclass="[^"]+">\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><p[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>'
patron += r'[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s'
patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?'
patron += r'(?:<img\sclass="[^"]+" meta-src="([^"]+)"[^>]+>|<img\sclass="[^"]+" src="" data-original="([^"]+)"[^>]+>)?'
matches = support.match(item, patron, headers=headers)[0]
for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2, scrapedurl3, scrapedthumbnail, scrapedthumbnail2 in matches:
scrapedtitle = cleantitle(scrapedtitle)
scrapedepisode = scrapedepisode.zfill(2)
scrapedepisodetitle = cleantitle(scrapedepisodetitle)
title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip()
if 'SUB-ITA' in scrapedtitle:
title += " "+support.typo("Sub-ITA", '_ [] color kod')
infoLabels = {}
infoLabels['season'] = scrapedseason
infoLabels['episode'] = scrapedepisode
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=support.typo(title, 'bold'),
fulltitle=scrapedtitle,
url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3,
contentType="episode",
plot=scrapedplot,
contentSerieName=scrapedserie,
contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '',
infoLabels=infoLabels,
thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.videolibrary(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findepvideos(item):
log()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
matches = scrapertools.find_multiple_matches(data, item.extra)
data = "\r\n".join(matches[0])
item.contentType = 'movie'
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
log()
if item.contentType == 'tvshow':
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, item.extra)
data = "\r\n".join(matches[0])
else:
log(item.url)
data = item.url
return support.server(item, data)

View File

@@ -70,7 +70,7 @@ def newest(categoria):
def category(item):
return support.scrape(item, r'<li.*?><a href="(.*?)"[^>]+>(.*?)<\/a>' ,['url', 'title'], action='peliculas', patronBlock= r'<ul class="' + item.args + r' scrolling">(.*?)<\/ul>')
return support.scrape(item, r'<li.*?><a href="(.*?)"[^>]+>(.*?)<\/a>' ,['url', 'title'], action='peliculas', patron_block= r'<ul class="' + item.args + r' scrolling">(.*?)<\/ul>')
def search(item, texto):
@@ -90,7 +90,7 @@ def search(item, texto):
def peliculas_src(item):
patron = r'<div class="thumbnail animation-2"><a href="([^"]+)"><img src="([^"]+)" alt="[^"]+" \/>[^>]+>([^<]+)<\/span>.*?<a href.*?>([^<]+)<\/a>[^>]+>[^>]+>(?:<span class="rating">IMDb\s*([0-9.]+)<\/span>)?.*?(?:<span class="year">([0-9]+)<\/span>)?[^>]+>[^>]+><p>(.*?)<\/p>'
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, typeContentDict={'movie':['Film'], 'tvshow':['TV']}, typeActionDict={'findvideos':['Film'], 'episodios':['TV']})
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, type_content_dict={'movie':['Film'], 'tvshow':['TV']}, type_action_dict={'findvideos':['Film'], 'episodios':['TV']})
def peliculas(item):
@@ -102,7 +102,7 @@ def peliculas(item):
return support.scrape(item, patron, ['url', 'thumb', 'rating', 'title', 'year', 'plot'], headers, action='episodios', patronNext='<span class="current">[^<]+<[^>]+><a href="([^"]+)"')
else:
patron = r'<div class="thumbnail animation-2"><a href="([^"]+)"><img src="([^"]+)" alt="[^"]+" \/>[^>]+>([^<]+)<\/span>.*?<a href.*?>([^<]+)<\/a>[^>]+>[^>]+>(?:<span class="rating">IMDb\s*([0-9.]+)<\/span>)?.*?(?:<span class="year">([0-9]+)<\/span>)?[^>]+>[^>]+><p>(.*?)<\/p>'
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, typeContentDict={'movie':['Film'], 'tvshow':['TV']}, typeActionDict={'findvideos':['Film'], 'episodios':['TV']})
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, type_content_dict={'movie':['Film'], 'tvshow':['TV']}, type_action_dict={'findvideos':['Film'], 'episodios':['TV']})
def newep(item):
log()
@@ -132,7 +132,7 @@ def newep(item):
return itemlist
def episodios(item):
return support.scrape(item, r'<a href="([^"]+)"><img src="([^"]+)">.*?<div class="numerando">([^<]+).*?<div class="episodiotitle">[^>]+>([^<]+)<\/a>',['url', 'thumb', 'episode', 'title'], patronBlock='<div id="seasons">(.*?)<div class="sbox')
return support.scrape(item, r'<a href="([^"]+)"><img src="([^"]+)">.*?<div class="numerando">([^<]+).*?<div class="episodiotitle">[^>]+>([^<]+)<\/a>',['url', 'thumb', 'episode', 'title'], patron_block='<div id="seasons">(.*?)<div class="sbox')
def findvideos(item):
log()

View File

@@ -150,9 +150,9 @@ def findvideos(item):
def findepisodevideo(item):
log()
patronBlock = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
patron_block = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)"(?:\sdata-original="[^"]+")?\sclass="[^"]+">' % item.extra[0][1].lstrip("0")
matches = support.match(item, patron, patronBlock, headers)[0]
matches = support.match(item, patron, patron_block, headers)[0]
data = ''
if len(matches) > 0:
data = matches[0]
@@ -169,9 +169,9 @@ def latestep(item):
itemlist = []
titles = []
patronBlock = r"Ultimi episodi aggiunti.*?<h2>"
patron_block = r"Ultimi episodi aggiunti.*?<h2>"
patron = r'<a href="([^"]*)"\sdata-src="([^"]*)"\sclass="owl-lazy.*?".*?class="title">(.*?)<small>\((\d*?)x(\d*?)\s(Sub-Ita|Ita)'
matches = support.match(item, patron, patronBlock, headers, host)[0]
matches = support.match(item, patron, patron_block, headers, host)[0]
for scrapedurl, scrapedimg, scrapedtitle, scrapedseason, scrapedepisode, scrapedlanguage in matches:
infoLabels = {}
@@ -296,8 +296,8 @@ def search(item, texto):
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
log()
patronBlock= r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>'
patron_block= r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>'
patron = r'<li><a href="([^"]+)">([^<]+)</a></li>'
return support.scrape(item, patron, ['url','title'], patronBlock=patronBlock, action='lista_serie', blacklist=["Home Page", "Calendario Aggiornamenti"])
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='lista_serie', blacklist=["Home Page", "Calendario Aggiornamenti"])
# ================================================================================================================

View File

@@ -41,7 +41,7 @@ def search(item, text):
def generos(item):
patron = '<a href="([^"#]+)">([a-zA-Z]+)'
return support.scrape(item, patron, ['url', 'title'], patronBlock='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas')
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas')
def peliculas(item):

10
channels/streamking.json Normal file
View File

@@ -0,0 +1,10 @@
{
"id": "streamking",
"name": "Stream King",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://streamking.cc/assets/img/logon.png",
"banner": "",
"categories": ["sport"]
}

58
channels/streamking.py Normal file
View File

@@ -0,0 +1,58 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Stream King **** TEST ****
# Alhaziel
# ------------------------------------------------------------
import re
import urllib
from core import scrapertools, httptools
from core.item import Item
from platformcode import logger
from platformcode import config
__channel__ = "streamking"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
def mainlist(item):
logger.info("[streamking] canali")
itemlist = []
data = httptools.downloadpage(host, headers=headers).data.replace('\n','')
patron = '<div class="tv-block">.*?<a href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
thumb = host + scrapedthumbnail
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentTitle=scrapedtitle,
title=scrapedtitle,
fulltitle=scrapedtitle,
url=url,
thumbnail=thumb))
return itemlist
def findvideos(item):
logger.info("[streamking] findvideos")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data.replace('\n','')
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = httptools.downloadpage(iframe, headers=headers).data
m3u8 = scrapertools.find_single_match(data, "file: '([^']+)'")
itemlist.append(
Item(channel=item.channel,
action="play",
title=item.title + 'Play',
url=m3u8 + '|' + urllib.urlencode(dict(headers))
))
return itemlist

View File

@@ -123,7 +123,7 @@ def search_peliculas(item):
def category(item):
blacklist = ['Serie TV Altadefinizione', 'HD AltaDefinizione', 'Al Cinema', 'Serie TV', 'Miniserie', 'Programmi Tv', 'Live', 'Trailers', 'Serie TV Aggiornate', 'Aggiornamenti', 'Featured']
itemlist = support.scrape(item, '<li><a href="([^"]+)"><span></span>([^<]+)</a></li>', ['url', 'title'], headers, blacklist, patronBlock='<ul class="table-list">(.*?)</ul>', action='peliculas')
itemlist = support.scrape(item, '<li><a href="([^"]+)"><span></span>([^<]+)</a></li>', ['url', 'title'], headers, blacklist, patron_block='<ul class="table-list">(.*?)</ul>', action='peliculas')
return support.thumb(itemlist)
@@ -132,7 +132,7 @@ def peliculas(item):
action = 'findvideos' if item.extra == 'movie' else 'episodios'
if item.args == 'movie':
patron= r'<div class="mediaWrap mediaWrapAlt">[^<]+<a href="([^"]+)" title="Permalink to\s([^"]+) \(([^<]+)\).*?"[^>]+>[^<]+<img[^s]+src="([^"]+)"[^>]+>[^<]+<\/a>.*?<p>\s*([a-zA-Z-0-9]+)\s*<\/p>'
itemlist = support.scrape(item, patron, ['url', 'title', 'year', 'thumb', 'quality'], headers, action=action, patronBlock='<div id="main_col">(.*?)main_col', patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
itemlist = support.scrape(item, patron, ['url', 'title', 'year', 'thumb', 'quality'], headers, action=action, patron_block='<div id="main_col">(.*?)main_col', patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
else:
patron = r'<div class="media3">[^>]+><a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+><\/a><[^>]+><a[^<]+><p>([^<]+) \(([^\)]+)[^<]+<\/p>.*?<p>\s*([a-zA-Z-0-9]+)\s*<\/p>'
itemlist = support.scrape(item, patron, ['url', 'thumb', 'title', 'year', 'quality'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')

View File

@@ -100,13 +100,13 @@ def findvideos(item):
def generos(item):
findhost()
patron = '<a href="([^"#]+)">([a-zA-Z]+)'
return support.scrape(item, patron, ['url', 'title'], patronBlock='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas')
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas')
def year(item):
findhost()
patron = r'<a href="([^"#]+)">(\d+)'
return support.scrape(item, patron, ['url', 'title'], patronBlock='<a href="#">Anno</a><ul class="sub-menu">.*?</ul>', action='peliculas')
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Anno</a><ul class="sub-menu">.*?</ul>', action='peliculas')
def play(item):

View File

@@ -32,11 +32,6 @@ def find_multiple_matches(text, pattern):
return re.findall(pattern, text, re.DOTALL)
def find_multiple_matches_groups(text, pattern):
r = re.compile(pattern)
return [m.groupdict() for m in r.finditer(text)]
# Convierte los codigos html "&ntilde;" y lo reemplaza por "ñ" caracter unicode utf-8
def decodeHtmlentities(data):
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)")

View File

@@ -92,202 +92,172 @@ def url_decode(url_enc):
def color(text, color):
return "[COLOR " + color + "]" + text + "[/COLOR]"
def scrape(func):
# args is a dict containing the foolowing keys:
def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="", patron_block="",
patronNext="", action="findvideos", addVideolibrary = True, type_content_dict={}, type_action_dict={}):
# patron: the patron to use for scraping page, all capturing group must match with listGroups
# listGroups: a list containing the scraping info obtained by your patron, in order
# accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating, episode, lang
# headers: values to pass to request header
# header: values to pass to request header
# blacklist: titles that you want to exclude(service articles for example)
# data: if you want to pass data manually, maybe because you need some custom replacement
# patronBlock: patron to get parts of the page (to scrape with patron attribute),
# patron_block: patron to get parts of the page (to scrape with patron attribute),
# if you need a "block inside another block" you can create a list, please note that all matches
# will be packed as string
# patronNext: patron for scraping next page link
# action: if you want results perform an action different from "findvideos", useful when scraping film by genres
# addVideolibrary: if "add to videolibrary" should appear
# url_host: string to prepend to scrapedurl, useful when url don't contain host
# example usage:
# import support
# itemlist = []
# patron = 'blablabla'
# headers = [['Referer', host]]
# blacklist = 'Request a TV serie!'
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot', 'episode', 'lang'],
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'title2', 'year', 'plot', 'episode', 'lang'],
# headers=headers, blacklist=blacklist)
# listGroups
# thumb = immagine, quality = qualità, url = link singolo o gruppo, title = titolo film o serie, title2 = titolo aggiuntivo
# year = anno del film o della serie, plot = descrizione film o serie, episode = numero stagione - numero episodio in caso di serie,
# lang = lingua del video
# 'type' is a check for typologies of content e.g. Film or TV Series
# 'episode' is a key to grab episode numbers if it is separated from the title
# IMPORTANT 'type' is a special key, to work need typeContentDict={} and typeActionDict={}
# IMPORTANT 'type' is a special key, to work need type_content_dict={} and type_action_dict={}
def wrapper(*args):
itemlist = []
itemlist = []
args = func(*args)
if not data:
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"')
data = re.sub('\n|\t', ' ', data)
# replace all ' with " and eliminate newline, so we don't need to worry about
log('DATA =', data)
item = args['item']
block = data
action = args['action'] if 'action' in args else 'findvideos'
anime = args['anime'] if 'anime' in args else ''
addVideolibrary = args['addVideolibrary'] if 'addVideolibrary' in args else True
blacklist = args['blacklist'] if 'blacklist' in args else ''
data = args['data'] if 'data' in args else ''
headers = args['headers'] if 'headers' in args else ''
patron = args['patron'] if 'patron' in args else ''
patronNext = args['patronNext'] if 'patronNext' in args else ''
patronBlock = args['patronBlock'] if 'patronBlock' in args else ''
typeActionDict = args['type_action_dict'] if 'type_action_dict' in args else {}
typeContentDict = args['type_content_dict'] if 'type_content_dict' in args else {}
if patron_block:
if type(patron_block) == str:
patron_block = [patron_block]
if not data:
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"')
data = re.sub('\n|\t', ' ', data)
# replace all ' with " and eliminate newline, so we don't need to worry about
log('DATA =', data)
for n, regex in enumerate(patron_block):
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
block = ""
for b in blocks:
block += "\n" + str(b)
log('BLOCK ', n, '=', block)
else:
block = data
if patron and listGroups:
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
block = data
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] #by greko aggiunto episode
lang = '' # aggiunto per gestire i siti con pagine di serietv dove si hanno i video in ita e in subita
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
match.extend([''] * (len(listGroups) - len(match)))
if patronBlock:
if type(patronBlock) == str:
patronBlock = [patronBlock]
scraped = {}
for kk in known_keys:
val = match[listGroups.index(kk)] if kk in listGroups else ''
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
for n, regex in enumerate(patronBlock):
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
block = ""
for b in blocks:
block += "\n" + str(b)
log('BLOCK ', n, '=', block)
else:
block = data
if patron:
matches = scrapertoolsV2.find_multiple_matches_groups(block, patron)
log('MATCHES =', matches)
title = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title"])).replace('', '\'').replace('"', "'").strip() # fix by greko da " a '
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere',
'rating', 'type', 'lang'] # by greko aggiunto episode
lang = '' # aggiunto per gestire i siti con pagine di serietv dove si hanno i video in ita e in subita
for match in matches:
listGroups = match.keys()
match = match.values()
longtitle = typo(title, 'bold')
if scraped['quality']: longtitle = longtitle + typo(scraped['quality'], '_ [] color kod')
if scraped['episode']:
scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x' , scraped['episode'])
longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle
if scraped['title2']:
title2 = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title2"])).replace('"', "'").strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
if len(listGroups) > len(match): # to fix a bug
match = list(match)
match.extend([''] * (len(listGroups) - len(match)))
scraped = {}
for kk in known_keys:
val = match[listGroups.index(kk)] if kk in listGroups else ''
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
title = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title"])
.replace('"',"'")).strip() # fix by greko da " a '
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
longtitle = typo(title, 'bold')
if scraped['quality']: longtitle = longtitle + typo(scraped['quality'], '_ [] color kod')
if scraped['episode']:
scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x', scraped['episode'])
longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle
if scraped['title2']:
title2 = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title2"]).replace('"', "'")).strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
## Aggiunto/modificato per gestire i siti che hanno i video
## in ita e subita delle serie tv nella stessa pagina
if scraped['lang']:
if 'sub' in scraped['lang'].lower():
lang = 'Sub-ITA'
else:
lang = 'ITA'
if lang != '':
longtitle += typo(lang, '_ [] color kod')
# if title is set, probably this is a list of episodes or video sources
if item.infoLabels["title"] or item.fulltitle:
infolabels = item.infoLabels
## Aggiunto/modificato per gestire i siti che hanno i video
## in ita e subita delle serie tv nella stessa pagina
if scraped['lang'] == '': #altrimenti nei canali dei film mi aggiunge sub-ita a tutti i film successivi
lang = '' # o in alternativa lang = 'ITA'
if scraped['lang']:
if 'sub' in scraped['lang'].lower():
lang = 'Sub-ITA'
else:
infolabels = {}
if scraped["year"]:
infolabels['year'] = scraped["year"]
if scraped["plot"]:
infolabels['plot'] = plot
if scraped["duration"]:
matches = scrapertoolsV2.find_multiple_matches(scraped["duration"],
r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
for h, m in matches:
scraped["duration"] = int(h) * 60 + int(m)
if not matches:
scraped["duration"] = scrapertoolsV2.find_single_match(scraped["duration"], r'(\d+)')
infolabels['duration'] = int(scraped["duration"]) * 60
if scraped["genere"]:
genres = scrapertoolsV2.find_multiple_matches(scraped["genere"], '[A-Za-z]+')
infolabels['genere'] = ", ".join(genres)
if scraped["rating"]:
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"])
lang = 'ITA'
if lang != '':
longtitle += typo(lang, '_ [] color kod')
if typeContentDict:
for name, variants in typeContentDict.items():
if scraped['type'] in variants:
item.contentType = name
if typeActionDict:
for name, variants in typeActionDict.items():
if scraped['type'] in variants:
action = name
if scraped["title"] not in blacklist:
it = Item(
channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
quality=scraped["quality"],
url=scraped["url"],
infoLabels=infolabels,
thumbnail=scraped["thumb"],
args=item.args
)
for lg in list(set(listGroups).difference(known_keys)):
it.__setattr__(lg, match[listGroups.index(lg)])
if 'itemHook' in args:
it = args['itemHook'](it)
itemlist.append(it)
checkHost(item, itemlist)
## if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \
## or (item.contentType == "movie" and action != "play"):
if (item.contentType == "tvshow" and (action != "findvideos" and action != "play")) \
or (item.contentType == "episode" and action != "play") \
or (item.contentType == "movie" and action != "play") :
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
infolabels = item.infoLabels
else:
for it in itemlist:
it.infoLabels = item.infoLabels
if 'itemlistHook' in args:
itemlist = args['itemlistHook'](itemlist)
infolabels = {}
if scraped["year"]:
infolabels['year'] = scraped["year"]
if scraped["plot"]:
infolabels['plot'] = plot
if scraped["duration"]:
matches = scrapertoolsV2.find_multiple_matches(scraped["duration"],r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
for h, m in matches:
scraped["duration"] = int(h) * 60 + int(m)
if not matches:
scraped["duration"] = scrapertoolsV2.find_single_match(scraped["duration"], r'(\d+)')
infolabels['duration'] = int(scraped["duration"]) * 60
if scraped["genere"]:
genres = scrapertoolsV2.find_multiple_matches(scraped["genere"], '[A-Za-z]+')
infolabels['genere'] = ", ".join(genres)
if scraped["rating"]:
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"])
if patronNext:
nextPage(itemlist, item, data, patronNext, 2)
if type_content_dict:
for name, variants in type_content_dict.items():
if scraped['type'] in variants:
item.contentType = name
if type_action_dict:
for name, variants in type_action_dict.items():
if scraped['type'] in variants:
action = name
if anime:
from specials import autorenumber
autorenumber.renumber(itemlist)
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
item.fulltitle = item.infoLabels["title"]
videolibrary(itemlist, item)
if 'fullItemlistHook' in args:
itemlist = args['fullItemlistHook'](itemlist)
if inspect.stack()[1][3] == 'episodios': item.contentType = 'episode'
return itemlist
if scraped["title"] not in blacklist:
it = Item(
channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
language = lang if lang != '' else '',
quality=scraped["quality"],
url=scraped["url"],
infoLabels=infolabels,
thumbnail=scraped["thumb"],
args=item.args
)
return wrapper
for lg in list(set(listGroups).difference(known_keys)):
it.__setattr__(lg, match[listGroups.index(lg)])
itemlist.append(it)
checkHost(item, itemlist)
if (item.contentType == "tvshow" and (action != "findvideos" and action != "play")) \
or (item.contentType == "episode" and action != "play") \
or (item.contentType == "movie" and action != "play"):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
else:
for it in itemlist:
it.infoLabels = item.infoLabels
if patronNext:
nextPage(itemlist, item, data, patronNext, 2)
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
item.fulltitle = item.infoLabels["title"]
videolibrary(itemlist, item)
return itemlist
def checkHost(item, itemlist):
@@ -428,9 +398,13 @@ def swzz_get_url(item):
return data
def menuItem(itemlist, filename, title='', action='', url='', contentType='movie', args=[]):
def menu(itemlist, title='', action='', url='', contentType='movie', args=[]):
# Function to simplify menu creation
frame = inspect.stack()[1]
filename = frame[0].f_code.co_filename
filename = os.path.basename(filename).replace('.py','')
# Call typo function
title = typo(title)
@@ -454,51 +428,6 @@ def menuItem(itemlist, filename, title='', action='', url='', contentType='movie
return itemlist
def menu(func):
def wrapper(*args):
args = func(*args)
item = args['item']
host = func.__globals__['host']
list_servers = func.__globals__['list_servers']
list_quality = func.__globals__['list_quality']
filename = func.__module__.split('.')[1]
listUrls = ['film', 'filmSub', 'tvshow', 'tvshowSub']
dictUrl = {}
for name in listUrls:
dictUrl[name] = args[name] if name in args else None
autoplay.init(item.channel, list_servers, list_quality)
# Main options
itemlist = []
if dictUrl['film'] is not None:
menuItem(itemlist, filename, 'Film bold', 'peliculas', host + dictUrl['film'])
### modificato by greko ########
for sub, var in dictUrl['filmSub']:
menuItem(itemlist, filename, sub + ' submenu', var[1],
host + var[0],
args=var[2] if len(var)>2 else '')
menuItem(itemlist, filename, 'Cerca submenu bold', 'search', host, args='film')
if dictUrl['tvshow'] is not None:
menuItem(itemlist, filename, 'Serie TV bold', 'peliculas', host + dictUrl['tvshow'], contentType='tvshow')
for sub, var in dictUrl['tvshowSub']:
menuItem(itemlist, filename, sub + ' submenu', var[1],
host + var[0], contentType='tvshow',
args=var[2] if len(var)>2 else '')
menuItem(itemlist, filename, 'Cerca submenu bold', 'search', host, args='serie')
### fine by greko ########
autoplay.show_option(item.channel, itemlist)
return itemlist
return wrapper
def typo(string, typography=''):
kod_color = '0xFF65B3DA' #'0xFF0081C2'
@@ -551,7 +480,7 @@ def typo(string, typography=''):
return string
def match(item, patron='', patronBlock='', headers='', url=''):
def match(item, patron='', patron_block='', headers='', url=''):
matches = []
url = url if url else item.url
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data.replace("'", '"')
@@ -559,8 +488,8 @@ def match(item, patron='', patronBlock='', headers='', url=''):
data = re.sub(r'>\s\s*<', '><', data)
log('DATA= ', data)
if patronBlock:
block = scrapertoolsV2.find_single_match(data, patronBlock)
if patron_block:
block = scrapertoolsV2.find_single_match(data, patron_block)
log('BLOCK= ',block)
else:
block = data
@@ -617,8 +546,7 @@ def nextPage(itemlist, item, data='', patron='', function_level=1, next_page='',
log('NEXT= ', next_page)
itemlist.append(
Item(channel=item.channel,
#action=inspect.stack()[function_level][3],
action = item.action,
action=inspect.stack()[function_level][3],
contentType=item.contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
url=next_page,
@@ -720,4 +648,5 @@ def channel_config(item, itemlist):
config=item.channel,
folder=False,
thumbnail=get_thumb('setting_0.png'))
)
)

View File

@@ -3,7 +3,6 @@
# Zip Tools
# --------------------------------------------------------------------------------
import io
import os
import zipfile
@@ -18,7 +17,6 @@ class ziptools:
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
file = io.FileIO(file)
zf = zipfile.ZipFile(file)
if not folder_to_extract:
self._createstructure(file, dir)

View File

@@ -1,3 +0,0 @@
{
"addon_version": "0.4"
}

View File

@@ -1,13 +1,11 @@
# -*- coding: utf-8 -*-
import hashlib
import io
import os
import shutil
import zipfile
from core import httptools, filetools, downloadtools
from core.ziptools import ziptools
from platformcode import logger, platformtools
from platformcode import logger, platformtools, config
import json
import xbmc
import re
@@ -64,7 +62,7 @@ def check_addon_init():
else:
# evitiamo che dia errore perchè il file è già in uso
localCommitFile.close()
calcCurrHash()
updateFromZip()
return True
if pos > 0:
@@ -209,8 +207,7 @@ def getSha(fileText):
def updateFromZip():
dp = platformtools.dialog_progress_bg('Kodi on Demand', 'Aggiornamento in corso...')
dp.update(0)
platformtools.dialog_notification('Kodi on Demand', 'Aggiornamento in corso...')
remotefilename = 'https://github.com/' + user + "/" + repo + "/archive/" + branch + ".zip"
localfilename = xbmc.translatePath("special://home/addons/") + "plugin.video.kod.update.zip"
@@ -218,7 +215,7 @@ def updateFromZip():
logger.info("localfilename=%s" % localfilename)
import urllib
urllib.urlretrieve(remotefilename, localfilename, lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp))
urllib.urlretrieve(remotefilename, localfilename)
# Lo descomprime
logger.info("decompressione...")
@@ -234,23 +231,23 @@ def updateFromZip():
logger.info(e)
return False
dp.update(95)
# puliamo tutto
shutil.rmtree(addonDir)
filetools.rename(destpathname + "addon-" + branch, addonDir)
logger.info("Cancellando il file zip...")
# Borra el zip descargado
logger.info("kodiondemand.core.updater borra fichero...")
os.remove(localfilename)
# os.remove(temp_dir)
platformtools.dialog_notification('Kodi on Demand', 'Aggiornamento completato!')
dp.update(100)
return hash
# https://stackoverflow.com/questions/3083235/unzipping-file-results-in-badzipfile-file-is-not-a-zip-file
def fixZipGetHash(zipFile):
f = io.FileIO(zipFile, 'r+b')
f = open(zipFile, 'r+b')
data = f.read()
pos = data.find(b'\x50\x4b\x05\x06') # End of central directory signature
hash = ''
@@ -268,10 +265,86 @@ def fixZipGetHash(zipFile):
return str(hash)
def _pbhook(numblocks, blocksize, filesize, url, dp):
try:
percent = min((numblocks*blocksize*90)/filesize, 100)
dp.update(percent)
except:
percent = 90
dp.update(percent)
class ziptools:
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
logger.info("file=%s" % file)
logger.info("dir=%s" % dir)
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
zf = zipfile.ZipFile(file)
if not folder_to_extract:
self._createstructure(file, dir)
num_files = len(zf.namelist())
for nameo in zf.namelist():
name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_')
logger.info("name=%s" % nameo)
if not name.endswith('/'):
logger.info("no es un directorio")
try:
(path, filename) = os.path.split(os.path.join(dir, name))
logger.info("path=%s" % path)
logger.info("name=%s" % name)
if folder_to_extract:
if path != os.path.join(dir, folder_to_extract):
break
else:
os.makedirs(path)
except:
pass
if folder_to_extract:
outfilename = os.path.join(dir, filename)
else:
outfilename = os.path.join(dir, name)
logger.info("outfilename=%s" % outfilename)
try:
if os.path.exists(outfilename) and overwrite_question:
from platformcode import platformtools
dyesno = platformtools.dialog_yesno("El archivo ya existe",
"El archivo %s a descomprimir ya existe" \
", ¿desea sobrescribirlo?" \
% os.path.basename(outfilename))
if not dyesno:
break
if backup:
import time
import shutil
hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime())
backup = os.path.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract)
if not os.path.exists(backup):
os.makedirs(backup)
shutil.copy2(outfilename, os.path.join(backup, os.path.basename(outfilename)))
outfile = open(outfilename, 'wb')
outfile.write(zf.read(nameo))
except:
logger.error("Error en fichero " + nameo)
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
def create_necessary_paths(filename):
try:
(path, name) = os.path.split(filename)
os.makedirs(path)
except:
pass
def _makedirs(self, directories, basedir):
for dir in directories:
curdir = os.path.join(basedir, dir)
if not os.path.exists(curdir):
os.mkdir(curdir)
def _listdirs(self, file):
zf = zipfile.ZipFile(file)
dirs = []
for name in zf.namelist():
if name.endswith('/'):
dirs.append(name)
dirs.sort()
return dirs

View File

@@ -72,7 +72,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
else:
logger.info(" url duplicada=" + url)
patron = r"""(https?://(?:www\.)?(?:threadsphere\.bid|adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co|adfoc\.us|lnx\.lu|sh\.st|href\.li|anonymz\.com|shrink-service\.it|rapidcrypt\.net|ecleneue\.com)/[^"']+)"""#|bit\.ly)/[^"']+)"""
patron = r"""(https?://(?:www\.)?(?:threadsphere\.bid|adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co|adfoc\.us|lnx\.lu|sh\.st|href\.li|anonymz\.com|shrink-service\.it|rapidcrypt\.net|ecleneue\.com)/[^"']+)"""
logger.info(" find_videos #" + patron + "#")
matches = re.compile(patron).findall(page_url)

View File

@@ -8,7 +8,7 @@
"patterns": [
{
"pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)",
"url": "http:\/\/wstream.video\/video\/\\1"
"url": "http:\/\/wstream.video\/\\1"
}
],
"ignore_urls": [ ]

View File

@@ -1,175 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Scritto per KOD -*-
# versione: 0.1
##import sys
##import os
##import json
##import xbmc
##import xbmcaddon, xbmcgui
import kdicc
##
##from core import channeltools, scrapertools
##from platformcode import logger, config, platformtools
##from core.item import Item
"""
Abbinato allo script kdicc permette di modificare gli url del file channels_active.json ( poi channels.json ):
1. riscrivere l'url in caso di redirect
2.
a. disattivare il canale in caso di problemi al sito
b. riattivare il canale quando l'url viene raggiunto nuovamente
quando far partire il check?
1. all'inizio per controllare i file nel channels_not_active (pochi)
2. durante l'apertura del canale
3. nelle varie ricerche globali
Problematiche:
1. i redirect sono gestiti in locale
2. se il canale viene disattivato bisogna scrivere nel json:
a. del canale per disattivarlo
b. inserire il canale nel channels_not_active
"""
class CheckHost(object):
def __init__(self):
pass
def http_code():
pass
def checkHost(item, itemlist):
# nel caso non ci siano risultati puo essere che l'utente abbia cambiato manualmente l'host, pertanto lo riporta
# al valore di default (fixa anche il problema del cambio di host da parte nostra)
if len(itemlist) == 0:
# trovo il valore di default
defHost = None
for s in channeltools.get_channel_json(item.channel)['settings']:
if s['id'] == 'channel_host':
defHost = s['default']
break
# lo confronto con quello attuale
if config.get_setting('channel_host', item.channel) != defHost:
config.set_setting('channel_host', defHost, item.channel)
##
##def host_check(itemlist, item=None):
## """
## se item == None allora siamo nello start()
##
## :param itemlist:
## :param item:
## :return:
## """
## #import web_pdb; web_pdb.set_trace()
## logger.info("host_check item, itemlist : %s %s " % (item, itemlist))
##
## if not item:
## logger.info("%s %s " % (item, itemlist))
## check_host(itemlist = '', item=None)
## else:
## pass
##
##
##def check_host(itemlist, item=None):
## #import web_pdb; web_pdb.set_trace()
## if not item:
## logger.info("ITEM : %s " % (item))
## channel_path = os.path.join(config.get_runtime_path(), "")
## file = 'channel_not_active.json'
## # leggo dal file channel_not_active
## with open(channel_path+file, 'r') as json_file:
## data = ''
## try:
## data = json.load(json_file)
## logger.info("Json data : %s " % (data))
## except:
## logger.info("Json except : %s " % (data))
## if len(data) !=0:
## logger.info("IF len(data) : %s " % len(data))
## for canale in data:
## url_org = str(data[canale])
##
##
## # X debug, da togliere
## else:
## logger.info("ELSE len(data) : %s " % len(data))
##
## else:
## logger.info()
##
## channel_path = os.path.join(config.get_runtime_path(), "")
## file = 'channels.json'
## #logger.info("canale Json data : %s " % canale)
## new_domain = []
## error_domain = []
## nonraggiungibile_domain = []
## code_error = []
## with open(channel_path+file, 'r') as json_file:
## data = json.load(json_file)
## logger.info("Json data : %s " % (data))#, data['settings']))
##
## for canale in data:
## url_org = str(data[canale])
## try:
## res = requests.get(url_org)
## url = str(res.url)
## logger.info("Canale : %s " % canale)
## logger.info("R = %s" % res.status_code)
## logger.info("siamo qui data[canale]: %s " % url_org) # contiene l'url nuovo se c'è link
## logger.info("siamo qui res.url: %s " % url) # contiene l'url nuovo se c'è link
##
## if url.endswith('/') == True:
## url = url[:-1]
##
## logger.info("url_org -> %s" % url_org)
## logger.info("url -> %s" % url)
## logger.info("################")
##
## if url_org != url:
## new_domain.append((canale, url_org, url))
## if res.status_code == 200:
## pass
## elif res.status_code == 301:
## code_error.append(res.status_code)
## elif str(res.status_code).startswith('5'):
## code_error.append(res.status_code)
## logger.info("res.code 5XX: %s " % res.status_code)
## ## elif res.status_code ==
## ## if res.code == 503:
## ## # disabilitare il canale
## ## logger.info("res.code non è 200: %s " % res.code)
## ## error_domain.append((canale, data[canale]))
## ## if res.code == 200 and 'link' in res.headers:
## ## logger.info("res.code è 200 and 'link': %s " % res.code)
## ## host_red = res.headers['link']
## ## pat = r'<([a-zA-Z0-9-\.\/:]+)/wp-json/>;'
## ## host_red = scrapertools.find_single_match(host_red, pat)
## ## logger.info("host_red: %s " % host_red)
## ## # confronto i 2 link, se non sono uguali c'è un redirect
## ## if host_red != data[canale]:
## ## logger.info("host_red host_link: %s - %s " % (host_red, data[canale]))
## ## new_domain.append((canale, host_red))
## ## logger.info("data modificato link: %s " % canale)
## ##
## ## logger.info("fuori dagli if': %s " % res.code)
## except requests.RequestException as e:
## """
## Il sito risulta non raggiungibile per qualche motivo
## quindi verrà disabilitato
## """
## logger.info("DICT: %s " % dict(error=e.message))
## logger.info("Canale : %s " % canale)
## logger.info("siti non raggiungibili: %s %s" % (url_org, res.status_code))
## logger.info("################")
## nonraggiungibile_domain.append((canale, url_org))
##
## logger.info("new_domain: %s " % new_domain)
## logger.info("code_error: %s " % code_error)
## logger.info("error_domain: %s " % error_domain)
## logger.info("nonraggiungibile_domain: %s " % nonraggiungibile_domain)