Merge pull request #1 from kodiondemand/master

update da kod master
This commit is contained in:
greko
2019-05-07 18:28:12 +02:00
committed by GitHub
17 changed files with 985 additions and 29 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.kod" name="Kodi on Demand" version="0.1.1" provider-name="KOD Team">
<addon id="plugin.video.kod" name="Kodi on Demand" version="0.2" provider-name="KOD Team">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>

View File

@@ -45,12 +45,12 @@ def mainlist(item):
def categories(item):
support.log(item)
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas',url_host=host)
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas')
return support.thumb(itemlist)
def AZlist(item):
support.log()
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list',url_host=host)
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list')
def newest(categoria):

70
channels/filmigratis.json Normal file
View File

@@ -0,0 +1,70 @@
{
"id": "filmigratis",
"name": "Filmi Gratis",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "filmigratis.png",
"banner": "filmigratis.png",
"categories": ["movie","tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "2", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

303
channels/filmigratis.py Normal file
View File

@@ -0,0 +1,303 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Filmi Gratis
# ------------------------------------------------------------
import base64
import re
import urlparse
from channelselector import get_thumb
from channels import filtertools, support, autoplay
from core import scrapertools, servertools, httptools, tmdb
from platformcode import logger, config
from core.item import Item
channel = 'filmigratis'
host = 'https://filmigratis.net'
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'vidoza', 'okru']
list_quality = ['1080p', '720p', '480p', '360']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmigratis')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmigratis')
headers = [['Referer', host]]
#-----------------------------------------------------------------------------------------------------------------------
def mainlist(item):
# Main options
itemlist = []
support.menu(itemlist, 'Al Cinema bold', 'carousel', host, contentType='movie')
support.menu(itemlist, 'Film alta definizione bold', 'peliculas', host, contentType='movie', args='film')
support.menu(itemlist, 'Categorie Film bold', 'categorias_film', host , contentType='movie', args='film')
support.menu(itemlist, 'Categorie Serie bold', 'categorias_serie', host, contentType='episode', args='serie')
support.menu(itemlist, '[COLOR blue]Cerca Film...[/COLOR] bold', 'search', host, contentType='movie', args='film')
support.menu(itemlist, '[COLOR blue]Cerca Serie...[/COLOR] bold', 'search', host, contentType='episode', args='serie')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def carousel(item):
logger.info('[filmigratis.py] carousel')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<div class="owl-carousel" id="postCarousel">(.*?)<section class="main-content">')
patron = r'background-image: url\((.*?)\).*?<h3.*?>(.*?)<.*?<a.*?<a href="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedthumb, scrapedtitle, scrapedurl, in matches:
itemlist.append(
Item(channel=item.channel,
action = "findvideos",
contentType = item.contentType,
title = scrapedtitle,
fulltitle = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumb,
args=item.args,
show = scrapedtitle,))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas(item):
logger.info('[filmigratis.py] peliculas')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<h1>Film streaming ita in alta definizione</h1>(.*?)<div class="content-sidebar">')
patron = r'<div class="timeline-left-wrapper">.*?<a href="(.*?)".*?src="(.*?)".*?<h3.*?>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedthumb, scrapedtitle, in matches:
itemlist.append(
Item(channel=item.channel,
action = "findvideos",
contentType = item.contentType,
title = scrapedtitle,
fulltitle = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumb,
args=item.args,
show = scrapedtitle))
patron = r'class="nextpostslink".*?href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[B]" + config.get_localized_string(30992) + "[/B]",
args=item.args,
url=next_page))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def categorias_film(item):
logger.info("[filmigratis.py] categorias_film")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'CATEGORIES.*?<ul>(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=channel,
action="peliculas_categorias",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
args=item.args,
thumbnail=""))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def categorias_serie(item):
logger.info("[filmigratis.py] categorias_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'class="material-button submenu-toggle"> SERIE TV.*?<ul>.*?</li>(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=channel,
contentType='episode',
action="peliculas_serie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
args=item.args,
thumbnail=""))
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas_categorias(item):
logger.info("[filmigratis.py] peliculas_categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="cnt">.*?src="(.*?)".*?title="([A-Z|0-9].*?)".*?<a href="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
if scrapedtitle == "":
scrapedtitle = scrapertools.find_single_match(data, r'<small>.*?([A-Z|0-9].*?) <')
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("È","È")
scrapedtitle = scrapedtitle.replace("–", "-")
scrapedtitle = scrapedtitle.replace("’", "'")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
args=item.args,
show=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def peliculas_serie(item):
logger.info("[filmigratis.py] peliculas_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'div class="cnt">.*?src="(.*?)".*?title="([A-Z|0-9].*?)".*?<a href="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedtitle, scrapedurl in matches:
if scrapedtitle == "":
scrapedtitle = scrapertools.find_single_match(data, r'<small>.*?([A-Z|0-9].*?) <')
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("È","È")
scrapedtitle = scrapedtitle.replace("–", "-")
scrapedtitle = scrapedtitle.replace("’", "'")
scrapedtitle = scrapedtitle.replace(" ", "")
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
args=item.args,
show=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def episodios(item):
logger.info("[filmigratis.py] episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="row">(.*?)<section class="main-content">')
patron = r'href="(.*?)".*?(S[^<]+) <'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace ("S0", "")
scrapedtitle = scrapedtitle.replace(" - EP ", "x")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumb,
args=item.args,
show=item.title))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.videolibrary(itemlist, item, 'color kod')
return itemlist
#-----------------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info('[filmigratis.py] search')
item.url = host + '/search/?s=' + texto
if item.args == 'serie':
try:
return peliculas_serie(item)
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
else:
try:
return peliculas_categorias(item)
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
#-----------------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info('[filmigratis.py] findvideos')
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + '[COLOR green][B] - ' + videoitem.title + '[/B][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.content
if item.args == "film":
support.videolibrary(itemlist, item, 'color kod')
autoplay.start(itemlist, item)
return itemlist

View File

@@ -298,8 +298,9 @@ def submenu_tools(item):
itemlist.append(Item(channel='custom', action='mainlist', title='Custom Channel'))
itemlist.append(Item(channel=CHANNELNAME, action="check_quickfixes", folder=False,
title=config.get_localized_string(30001), plot="Versión actual: %s" % config.get_addon_version() ))
#Disabilitato il menu degli aggiornamenti
#itemlist.append(Item(channel=CHANNELNAME, action="check_quickfixes", folder=False,
# title=config.get_localized_string(30001), plot="Versión actual: %s" % config.get_addon_version() ))
itemlist.append(Item(channel=CHANNELNAME, action="update_quasar", folder=False,
title=config.get_localized_string(70569)))
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False,

View File

@@ -7,6 +7,7 @@ import urllib
from lib import unshortenit
from platformcode import logger, config
from channelselector import thumb
from channels import autoplay
def hdpass_get_servers(item):
@@ -86,7 +87,7 @@ def color(text, color):
def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="", patron_block="",
patronNext="", action="findvideos", url_host="", addVideolibrary = True):
patronNext="", action="findvideos", addVideolibrary = True):
# patron: the patron to use for scraping page, all capturing group must match with listGroups
# listGroups: a list containing the scraping info obtained by your patron, in order
# accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating
@@ -144,8 +145,8 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
scraped = {}
for kk in known_keys:
val = match[listGroups.index(kk)] if kk in listGroups else ''
if kk == "url":
val = url_host + val
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
@@ -462,7 +463,9 @@ def nextPage(itemlist, item, data, patron, function_level=1):
# If the call is direct, leave it blank
next_page = scrapertoolsV2.find_single_match(data, patron)
log('NEXT= ',next_page)
if 'http' not in next_page:
next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
log('NEXT= ', next_page)
if next_page != "":
itemlist.append(
@@ -477,7 +480,10 @@ def nextPage(itemlist, item, data, patron, function_level=1):
return itemlist
def server(item, data='', headers=''):
def server(item, data='', headers='', AutoPlay=True):
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', item.channel)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', item.channel)
if not data:
data = httptools.downloadpage(item.url, headers=headers).data
@@ -492,9 +498,23 @@ def server(item, data='', headers=''):
videoitem.channel = item.channel
videoitem.contentType = item.contentType
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
if AutoPlay == True:
autoplay.start(itemlist, item)
return itemlist
def aplay(item, itemlist, list_servers='', list_quality=''):
if inspect.stack()[1][3] == 'mainlist':
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
else:
autoplay.start(itemlist, item)
def log(stringa1="", stringa2="", stringa3="", stringa4="", stringa5=""):
# Function to simplify the log
# Automatically returns File Name and Function Name

65
channels/toonitalia.json Normal file
View File

@@ -0,0 +1,65 @@
{
"id": "toonitalia",
"name": "ToonItalia",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "toonitalia.png",
"banner": "toonitalia.png",
"categories": ["tvshow", "movie", "vosi", "anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Italiano"
]
}
]
}

332
channels/toonitalia.py Normal file
View File

@@ -0,0 +1,332 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per ToonItalia
# ------------------------------------------------------------
import re
import urlparse
from channels import autoplay, filtertools, support
from core import scrapertools, scrapertoolsV2, httptools, tmdb, servertools
from core.item import Item
from platformcode import logger, config
channel = "toonitalia"
host = "https://toonitalia.org"
headers = [['Referer', host]]
list_servers = ['wstream', 'openload', 'streamango']
list_quality = ['HD', 'default']
def mainlist(item):
# Main options
itemlist = []
support.menu(itemlist, 'Ultimi episodi inseriti bold', 'insert', host, contentType='episode')
support.menu(itemlist, 'Ultime novità bold', 'updates', host, contentType='episode')
support.menu(itemlist, 'Episodi più visti bold', 'most_view', host, contentType='episode')
support.menu(itemlist, 'Anime', 'list', host + '/lista-anime-2/', contentType='episode')
support.menu(itemlist, 'Sub-Ita submenu', 'list', host + '/lista-anime-sub-ita/', contentType='episode')
support.menu(itemlist, 'Serie TV bold', 'list', host + '/lista-serie-tv/', contentType='episode')
support.menu(itemlist, 'Film Animazione bold', 'list', host + '/lista-film-animazione/', contentType="episode", args="film")
support.menu(itemlist, '[COLOR blue]Cerca anime e serie...[/COLOR] bold', 'search', host, contentType='episode')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
#----------------------------------------------------------------------------------------------------------------------------------------------
def insert(item):
logger.info("[toonitalia.py] insert")
itemlist = []
minpage = 14
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<h2 class="entry-title"><a href="([^"]+)" rel="bookmark">([^<]+)</a></h2>.*?'
patron += r'<p class[^>]+><a href="[^"]+"><img width[^>]+src="([^"]+)" class[^>]+>.*?'
patron += r'<p>(.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot) in enumerate(matches):
if (p - 1) * minpage > i: continue
if i >= p * minpage: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
action="episodios",
contentType="episode",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
if len(matches) >= p * minpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=channel,
args=item.args,
action="insert",
title="[COLOR blue][B]Successivo >[/B][/COLOR]",
url=scrapedurl,
thumbnail="thumb_next.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#----------------------------------------------------------------------------------------------------------------------------------------------
def updates(item):
logger.info("[toonitalia.py] updates")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = r'Aggiornamenti</h2>(.*?)</ul>'
matches = re.compile(blocco, re.DOTALL).findall(data)
for scrapedurl in matches:
blocco = scrapedurl
patron = r'<a href="(.*?)">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
action="episodios",
contentType="episode",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#----------------------------------------------------------------------------------------------------------------------------------------------
def most_view(item):
logger.info("[toonitalia.py] most_view")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = r'I piu visti</h2>(.*?)</ul>'
matches = re.compile(blocco, re.DOTALL).findall(data)
for scrapedurl in matches:
blocco = scrapedurl
patron = r'<a href="([^"]+)" title="[^"]+" class="wpp-post-title" target="_self">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
action="episodios",
contentType="episode",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#----------------------------------------------------------------------------------------------------------------------------------------------
def list(item):
logger.info("[toonitalia.py] list")
itemlist = []
minpage = 14
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<li ><a href="([^"]+)" title="[^>]+">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * minpage > i: continue
if i >= p * minpage: break
if 'Film Animazione disponibili' not in scrapedtitle:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedplot = ""
itemlist.append(
Item(channel=channel,
action = 'episodios' if not 'film' in item.args else 'findvideos',
contentType=item.contentType,
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
args=item.args,
plot=scrapedplot))
if len(matches) >= p * minpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=channel,
args=item.args,
contentType=item.contentType,
action="list",
title="[COLOR blue][B]Successivo >[/B][/COLOR]",
url=scrapedurl))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#----------------------------------------------------------------------------------------------------------------------------------------------
def peliculas(item):
logger.info("[toonitalia] peliculas")
itemlist = []
minpage = 14
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<h2 class="entry-title"><a href="([^"]+)" rel="bookmark">([^<]+)</a></h2>.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
print data
for i, (scrapedurl, scrapedtitle, scrapedplot) in enumerate(matches):
if (p - 1) * minpage > i: continue
if i >= p * minpage: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
action="episodios",
contentType="episode",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
plot=scrapedplot))
if len(matches) >= p * minpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=channel,
extra=item.extra,
action="peliculas",
title="[COLOR blue][B]Successivo >[/B][/COLOR]",
url=scrapedurl))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
#----------------------------------------------------------------------------------------------------------------------------------------------
def episodios(item):
logger.info("[toonitalia.py] episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<br /> <a href="([^"]+)"\s*target="_blank"\s*rel[^>]+>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if 'Wikipedia' not in scrapedurl:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace("×", "x")
scrapedtitle = scrapedtitle.replace("_", " ")
scrapedtitle = scrapedtitle.replace(".mp4", "")
# puntata = scrapertools.find_single_match(scrapedtitle, '[0-9]+x[0-9]+')
puntata = scrapedtitle
for i in itemlist:
if i.args == puntata: #è già stata aggiunta
i.url += " " + scrapedurl
break
else:
itemlist.append(
Item(channel=channel,
action="findvideos",
contentType=item.contentType,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
thumbnail=item.thumbnail,
fulltitle=scrapedtitle,
url=scrapedurl,
args = puntata,
show=item.show,
plot=item.plot))
support.videolibrary(itemlist, item, 'color kod')
return itemlist
#----------------------------------------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info("[toonitalia.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
#----------------------------------------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info("[toonitalia.py] findvideos")
if item.args == 'film':
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = channel
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[COLOR blue] ' + "[[B]" + server + "[/B]][/COLOR] " + item.title])
videoitem.thumbnail = item.thumbnail
videoitem.plot = item.plot
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
else:
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.channel = channel
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[COLOR blue] ' + "[[B]" + server + "[/B]] " + item.title + '[/COLOR]'])
videoitem.thumbnail = item.thumbnail
videoitem.plot = item.plot
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
autoplay.start(itemlist, item)
return itemlist

View File

@@ -103,13 +103,13 @@ def findvideos(item):
def generos(item):
findhost()
patron = '<a href="([^"#]+)">([a-zA-Z]+)'
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas', url_host=host)
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas')
def year(item):
findhost()
patron = r'<a href="([^"#]+)">(\d+)'
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Anno</a><ul class="sub-menu">.*?</ul>', action='peliculas', url_host=host)
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Anno</a><ul class="sub-menu">.*?</ul>', action='peliculas')
def play(item):

View File

@@ -714,7 +714,7 @@ def check_list_links(itemlist, numero='', timeout=3):
for it in itemlist:
if numero > 0 and it.server != '' and it.url != '':
verificacion = check_video_link(it.url, it.server, timeout)
it.title = verificacion + ', ' + it.title.strip()
it.title = verificacion + ' ' + it.title.strip()
it.alive = verificacion
numero -= 1
return itemlist
@@ -725,31 +725,37 @@ def check_video_link(url, server, timeout=3):
:param url, server: Link y servidor
:return: str(2) '??':No se ha podido comprobar. 'Ok':Parece que el link funciona. 'NO':Parece que no funciona.
"""
NK = "[COLOR 0xFFF9B613][B]" + u'\u25cf' + "[/B][/COLOR]"
OK = "[COLOR 0xFF00C289][B]" + u'\u25cf' + "[/B][/COLOR]"
KO = "[COLOR 0xFFC20000][B]" + u'\u25cf' + "[/B][/COLOR]"
try:
server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server])
except:
server_module = None
logger.info("[check_video_link] No se puede importar el servidor! %s" % server)
return "??"
return NK
if hasattr(server_module, 'test_video_exists'):
ant_timeout = httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = timeout # Limitar tiempo de descarga
try:
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
logger.info("[check_video_link] No existe! %s %s %s" % (message, server, url))
resultado = "[COLOR red][B]NO[/B][/COLOR]"
resultado = KO
else:
logger.info("[check_video_link] comprobacion OK %s %s" % (server, url))
resultado = "[COLOR green][B]OK[/B][/COLOR]"
resultado = OK
except:
logger.info("[check_video_link] No se puede comprobar ahora! %s %s" % (server, url))
resultado = "??"
resultado = NK
finally:
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restaurar tiempo de descarga
return resultado
logger.info("[check_video_link] No hay test_video_exists para servidor: %s" % server)
return "??"
return NK

View File

@@ -7,7 +7,7 @@ import os
import sys
import urllib2
import time
import updater
from core import channeltools
from core import scrapertools
from core import servertools
@@ -32,7 +32,6 @@ def start():
def run(item=None):
logger.info()
if not item:
# Extract item from sys.argv
if sys.argv[2]:
@@ -60,7 +59,7 @@ def run(item=None):
from channels import side_menu
item= Item()
item = side_menu.check_user_home(item)
item.start = True;
item.start = True
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
if not config.get_setting('show_once'):
@@ -75,6 +74,9 @@ def run(item=None):
if item.action == "":
logger.info("Item sin accion")
return
if item.action == "update":
updater.update()
# Action for main menu in channelselector
elif item.action == "getmainlist":
@@ -117,10 +119,9 @@ def run(item=None):
# Action in certain channel specified in "action" and "channel" parameters
else:
# Entry point for a channel is the "mainlist" action, so here we check parental control
if item.action == "mainlist":
#updater.checkforupdates() beta version checking for update, still disabled
# Parental control
# If it is an adult channel, and user has configured pin, asks for it
@@ -128,7 +129,6 @@ def run(item=None):
tecleado = platformtools.dialog_input("", config.get_localized_string(60334), True)
if tecleado is None or tecleado != config.get_setting("adult_password"):
return
# # Actualiza el canal individual
# if (item.action == "mainlist" and item.channel != "channelselector" and
# config.get_setting("check_for_channel_updates") == True):

View File

@@ -3,10 +3,14 @@
# Updater (kodi)
# --------------------------------------------------------------------------------
import os
import os, sys
import time
import threading
import traceback
import urllib
import json
import xbmc
import shutil
from platformcode import config, logger, platformtools
@@ -16,6 +20,9 @@ from core import downloadtools
from core import ziptools
from core import filetools
REMOTE_FILE = "https://github.com/kodiondemand/addon/archive/master.zip"
DESTINATION_FOLDER = xbmc.translatePath("special://home/addons") + "/plugin.video.kod"
REMOTE_VERSION_FILE = "https://raw.githubusercontent.com/kodiondemand/addon/master/version.json"
def check_addon_init():
logger.info()
@@ -68,8 +75,159 @@ def check_addon_init():
time.sleep(5) # Dejamos terminar la primera verificación...
return
def checkforupdates(plugin_mode=True):
logger.info("kodiondemand.core.updater checkforupdates")
response = urllib.urlopen(REMOTE_VERSION_FILE)
data = json.loads(response.read())
'''
{
"update": {
"name": "Kodi on Demand",
"tag": "1.0.0",
"version": "1000",
"date": "03/05/2019",
"changes": "Added Updater"
}
}
'''
# remote is addon version without dots
remote_version = data["update"]["version"]
# tag version is version with dots used to a betterview gui
tag_version = data["update"]["tag"]
logger.info("kodiondemand.core.updater version remota="+tag_version+" "+remote_version)
'''
# Lee el fichero con la versión instalada
localFileName = LOCAL_VERSION_FILE
logger.info("kodiondemand.core.updater fichero local version: "+localFileName)
infile = open( localFileName )
data = infile.read()
infile.close()
#logger.info("xml local="+data)
'''
path_local = xbmc.translatePath("special://home/addons/") + "plugin.video.kod/version.json"
data = json.loads(open(path_local).read())
version_local = data["update"]["version"]
tag_local = data["update"]["tag"]
logger.info("kodiondemand.core.updater version local="+tag_local+" "+version_local)
try:
numero_remote_version = int(remote_version)
numero_version_local = int(version_local)
except:
import traceback
logger.info(traceback.format_exc())
remote_version = ""
version_local = ""
if remote_version=="" or version_local=="":
arraydescargada = tag_version.split(".")
arraylocal = tag_local.split(".")
# local 2.8.0 - descargada 2.8.0 -> no descargar
# local 2.9.0 - descargada 2.8.0 -> no descargar
# local 2.8.0 - descargada 2.9.0 -> descargar
if len(arraylocal) == len(arraydescargada):
logger.info("caso 1")
hayqueactualizar = False
for i in range(0, len(arraylocal)):
print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
if int(arraydescargada[i]) > int(arraylocal[i]):
hayqueactualizar = True
# local 2.8.0 - descargada 2.8 -> no descargar
# local 2.9.0 - descargada 2.8 -> no descargar
# local 2.8.0 - descargada 2.9 -> descargar
if len(arraylocal) > len(arraydescargada):
logger.info("caso 2")
hayqueactualizar = False
for i in range(0, len(arraydescargada)):
#print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
if int(arraydescargada[i]) > int(arraylocal[i]):
hayqueactualizar = True
# local 2.8 - descargada 2.8.8 -> descargar
# local 2.9 - descargada 2.8.8 -> no descargar
# local 2.10 - descargada 2.9.9 -> no descargar
# local 2.5 - descargada 3.0.0
if len(arraylocal) < len(arraydescargada):
logger.info("caso 3")
hayqueactualizar = True
for i in range(0, len(arraylocal)):
#print arraylocal[i], arraydescargada[i], int(arraylocal[i])>int(arraydescargada[i])
if int(arraylocal[i]) > int(arraydescargada[i]):
hayqueactualizar = False
elif int(arraylocal[i]) < int(arraydescargada[i]):
hayqueactualizar = True
break
else:
hayqueactualizar = (numero_remote_version > numero_version_local)
if hayqueactualizar:
if plugin_mode:
logger.info("kodiondemand.core.updater actualizacion disponible")
# Añade al listado de XBMC
import xbmcgui
#thumbnail = IMAGES_PATH+"Crystal_Clear_action_info.png"
thumbnail = os.path.join(config.get_runtime_path() , "resources" , "images", "service_update.png")
logger.info("thumbnail="+thumbnail)
listitem = xbmcgui.ListItem( "Scarica la versione "+tag_version, thumbnailImage=thumbnail )
itemurl = '%s?action=update&version=%s' % ( sys.argv[ 0 ] , tag_version )
import xbmcplugin
xbmcplugin.addDirectoryItem( handle = int(sys.argv[ 1 ]), url = itemurl , listitem=listitem, isFolder=True)
# Avisa con un popup
dialog = xbmcgui.Dialog()
dialog.ok("Versione "+tag_version+" disponibile","E' possibile scaricare la nuova versione del plugin\nattraverso l'opzione nel menù principale.")
else:
import xbmcgui
yes_pressed = xbmcgui.Dialog().yesno( "Versione "+tag_version+" disponibile" , "Installarla?" )
if yes_pressed:
params = {"version":tag_version}
update(params)
def update():
# Descarga el ZIP
logger.info("kodiondemand.core.updater update")
remotefilename = REMOTE_FILE
localfilename = xbmc.translatePath("special://home/addons/") + "plugin.video.kod.update.zip"
logger.info("kodiondemand.core.updater remotefilename=%s" % remotefilename)
logger.info("kodiondemand.core.updater localfilename=%s" % localfilename)
logger.info("kodiondemand.core.updater descarga fichero...")
urllib.urlretrieve(remotefilename,localfilename)
#from core import downloadtools
#downloadtools.downloadfile(remotefilename, localfilename, continuar=False)
# Lo descomprime
logger.info("kodiondemand.core.updater descomprime fichero...")
unzipper = ziptools.ziptools()
destpathname = xbmc.translatePath("special://home/addons/")
logger.info("kodiondemand.core.updater destpathname=%s" % destpathname)
unzipper.extract(localfilename,destpathname, os.path.join(xbmc.translatePath("special://home/addons/"), "plugin.video.kod/"))
temp_dir = os.path.join(destpathname,"addon-master")
files = os.listdir(temp_dir)
#for f in files:
# shutil.move(os.path.join(temp_dir, f), os.path.join(xbmc.translatePath("special://home/addons/"), "plugin.video.kod/", f))
# Borra el zip descargado
logger.info("kodiondemand.core.updater borra fichero...")
os.remove(localfilename)
#os.remove(temp_dir)
logger.info("kodiondemand.core.updater ...fichero borrado")
'''
def check_addon_updates(verbose=False):
logger.info()
@@ -158,3 +316,4 @@ def check_addon_updates(verbose=False):
if verbose:
platformtools.dialog_notification(config.get_localized_string(70674), config.get_localized_string(70675))
return False
'''

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 145 KiB

View File

@@ -4,8 +4,8 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "https://upvid.co/embed-([a-z0-9]+).html",
"url": "https://upvid.co/embed-\\1.html"
"pattern": "upvid.(?:co|live)/(?:embed-)?([a-z0-9]+).html",
"url": "https://upvid.live/\\1.html"
}
]
},

View File

@@ -29,7 +29,7 @@ def get_video_url(page_url, premium = False, user = "", password = "", video_pas
if '゚ω゚ノ' in data:
break
else:
page_url = scrapertools.find_single_match(data, '"iframe" src="([^"]+)')
page_url = scrapertools.find_single_match(data, '\'<iframe src="([^"]+)')
if not page_url:
page_url = scrapertools.find_single_match(data, '<input type="hidden" id="link" value="([^"]+)')
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)