Merge remote-tracking branch 'origin/master'

This commit is contained in:
marco
2020-05-21 21:07:40 +02:00
12 changed files with 109 additions and 93 deletions

View File

@@ -12,15 +12,9 @@ from platformcode import logger, config
def findhost():
page = httptools.downloadpage('https://cb01.uno/')
# permUrl = page.headers
# if 'location' in permUrl:
# if 'google' in permUrl['location']:
# host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
# else:
# host = permUrl['location']
# else:
host = support.match(page.data, patron=r'<a href="([^"]+)').match
host = httptools.downloadpage('https://cb01.uno/', follow_redirect=True).url
if host == 'https://cb01.uno/':
host = support.match(host, patron=r'<a href="([^"]+)', debug=True).match
return host
@@ -118,7 +112,7 @@ def peliculas(item):
action = 'findvideos'
else:
patronBlock = r'Ultime SerieTv aggiornate(?P<block>.*?)Lista'
patron = r'src="(?P<thumb>[^"]+)" alt="(?P<title>.*?)(?: &#8211; \d+&#215;\d+)?(?:"| &#8211; )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>[^>]+>[^>]+><a href="(?P<url>[^"]+)".*?<div class="rpwe-summary">.*?\((?P<year>\d{4})[^\)]*\) (?P<plot>[^<]+)<'
patron = r'src=(?:")?(?P<thumb>[^ "]+)(?:")? alt=(?:")?(?P<title>.*?)(?: &#8211; \d+&#215;\d+)?(?:>|"| &#8211; )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>.*?<a href=(?:")?(?P<url>[^" ]+)(?:")?.*?rpwe-summary[^>]*>(?P<genre>[^\(]*)\((?P<year>\d{4})[^\)]*\) (?P<plot>[^<]+)<'
action = 'episodios'
elif '/serietv/' not in item.url:
@@ -126,7 +120,7 @@ def peliculas(item):
action = 'findvideos'
else:
patron = r'div class="card-image">.*?<img src="(?P<thumb>[^ ]+)" alt.*?<a href="(?P<url>[^ >]+)">(?P<title>.*?)(?: &#8211;\s*(?:[SS]tagione \d|\d).*?)?(?P<lang>(?:[Ss][Uu][Bb]-)?[Ii][Tt][Aa])?<\/a>.*?(?:<strong><span style="[^"]+">(?P<genre>[^<>0-9(]+)\((?P<year>[0-9]{4}).*?</(?:p|div)>(?P<plot>.*?))?</div'
patron = r'card-image[^>]*>\s*<a href=(?:")?(?P<url>[^" >]+)(?:")?\s*>\s*<img src=(?:")?(?P<thumb>[^" ]+)(?:")? alt="(?P<title>.*?)(?: &#8211; \d+&#215;\d+)?(?:"| &#8211; )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>(?P<genre>[^\(]+)\((?P<year>\d{4})[^>]*>[^>]+>[^>]+>[^>]+>(?:<p>)?(?P<plot>[^<]+)'
action = 'episodios'
item.contentType = 'tvshow'
@@ -137,10 +131,9 @@ def peliculas(item):
@support.scrape
def episodios(item):
data = httptools.downloadpage(item.url, headers=headers).data
data = data.replace("'", '"')
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
# support.dbg()
data = support.match(item.url, headers=headers).data
support.log(data)
if 'TUTTA LA ' in data:
folderUrl = scrapertools.find_single_match(data, 'TUTTA LA \w+\s+(?:&#8211;|-)\s+<a href="([^"]+)')
data = httptools.downloadpage(folderUrl).data
@@ -150,7 +143,7 @@ def episodios(item):
item.serieFolder = True
return item
else:
patronBlock = r'(?P<block><div class="sp-head[a-z ]*?" title="Espandi">\s*(?:STAGION[EI]\s*(?:DA\s*[0-9]+\s*A)?\s*[0-9]+|MINISERIE) - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?[^<>]*?<\/div>.*?)<div class="spdiv">\[riduci\]<\/div>'
patronBlock = r'(?P<block>sp-head[^>]+>\s*(?:STAGION[EI]\s*(?:DA\s*[0-9]+\s*A)?\s*[0-9]+|MINISERIE) - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?<\/div>.*?)spdiv[^>]*>'
patron = r'(?:/>|<p>|<strong>)(?P<url>.*?(?P<episode>[0-9]+(?:&#215;|×)[0-9]+)\s*(?P<title2>.*?)?(?:\s*&#8211;|\s*-|\s*<).*?)(?:<\/p>|<br)'
def itemlistHook(itemlist):
title_dict = {}
@@ -236,7 +229,7 @@ def findvid_serie(item):
def load_vid_series(html, item, itemlist, blktxt):
support.log('HTML',html)
# Estrae i contenuti
matches = support.match(html, patron=r'<a href="([^"]+)"[^=]+="_blank"[^>]+>(?!<!--)(.*?)(?:</a>|<img)').matches
matches = support.match(html, patron=r'<a href=(?:")?([^ "]+)[^>]+>(?!<!--)(.*?)(?:</a>|<img)').matches
for url, server in matches:
item = Item(channel=item.channel,
action="play",

View File

@@ -214,8 +214,12 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
if scraped['season']:
stagione = scraped['season']
item.infoLabels['season'] = int(scraped['season'])
item.infoLabels['episode'] = int(scraped['episode'])
episode = str(int(scraped['season'])) +'x'+ str(int(scraped['episode'])).zfill(2)
elif item.season:
item.infoLabels['season'] = int(item.season)
item.infoLabels['episode'] = int(scrapertools.find_single_match(scraped['episode'], r'(\d+)'))
episode = item.season +'x'+ scraped['episode']
elif item.contentType == 'tvshow' and (scraped['episode'] == '' and scraped['season'] == '' and stagione == ''):
item.news = 'season_completed'
@@ -225,6 +229,8 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
if 'x' in episode:
ep = episode.split('x')
episode = str(int(ep[0])).zfill(1) + 'x' + str(int(ep[1])).zfill(2)
item.infoLabels['season'] = int(ep[0])
item.infoLabels['episode'] = int(ep[1])
second_episode = scrapertools.find_single_match(episode, r'x\d+x(\d+)')
if second_episode: episode = re.sub(r'(\d+x\d+)x\d+',r'\1-', episode) + second_episode.zfill(2)
@@ -240,14 +246,16 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
if item.infoLabels["title"] == scraped["title"]:
infolabels = item.infoLabels
else:
infolabels = {}
if function == 'episodios':
infolabels = item.infoLabels
else:
infolabels = {}
if scraped['year']:
infolabels['year'] = scraped['year']
if scraped["plot"]:
infolabels['plot'] = plot
if scraped['duration']:
matches = scrapertools.find_multiple_matches(scraped['duration'],
r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
matches = scrapertools.find_multiple_matches(scraped['duration'],r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
for h, m in matches:
scraped['duration'] = int(h) * 60 + int(m)
if not matches:

View File

@@ -196,10 +196,7 @@ def get_trakt_watched(id_type, mediatype, update=False):
def trakt_check(itemlist):
from core.support import typo
id_result = ''
# check = u'\u221a'
check = typo(u'\u221a','color kod bold')+' '
synced = False
try:
for item in itemlist:
@@ -221,7 +218,7 @@ def trakt_check(itemlist):
id_result = get_trakt_watched(id_type, mediatype)
if info['mediatype'] == 'movie':
if info[id_type + '_id'] in id_result:
item.title = check + item.title
item.infoLabels['playcount'] = 1
elif info['mediatype'] == 'episode':
if info[id_type + '_id'] in id_result:
@@ -236,7 +233,7 @@ def trakt_check(itemlist):
season_watched = id_result[id][season]
if episode in season_watched:
item.title = check + item.title
item.infoLabels['playcount'] = 1
else:
break
except:

View File

@@ -1,9 +1,10 @@
import base64
import json
import random
import struct
import time
import urllib
import base64, json, random, struct, time, sys, traceback
if sys.version_info[0] >= 3:
import urllib.request as urllib
xrange = range
else:
import urllib
from core import httptools
from threading import Thread
@@ -102,7 +103,7 @@ class Client(object):
return files
except:
print(traceback.format_exc())
logger.info(traceback.format_exc())
pass
return files

View File

@@ -1,5 +1,10 @@
import urllib2
import traceback
import sys, traceback
from platformcode import logger
if sys.version_info[0] >= 3:
from urllib.request import Request, urlopen
else:
from urllib2 import Request, urlopen
class Cursor(object):
def __init__(self, file):
@@ -18,14 +23,14 @@ class Cursor(object):
file = self._file._client.api_req({'a': 'g', 'g': 1, 'p': self._file.file_id})
self._file.url= file["g"]
req = urllib2.Request(self._file.url)
req = Request(self._file.url)
req.headers['Range'] = 'bytes=%s-' % (offset)
try:
self.conn = urllib2.urlopen(req)
self.conn = urlopen(req)
try:
self.prepare_decoder(offset)
except:
print(traceback.format_exc())
logger.error(traceback.format_exc())
except:
self.mega_request(offset, True)

View File

@@ -28,8 +28,3 @@ class File(object):
self.cursor = True
self.cursors.append(c)
return c

View File

@@ -1,13 +1,16 @@
import BaseHTTPServer
import urlparse
import time
import urllib
import types
import os
import re
import time, os, re, sys
if sys.version_info[0] >= 3:
from http.server import BaseHTTPRequestHandler
import urllib.request as urllib
import urllib.parse as urlparse
else:
from BaseHTTPServer import BaseHTTPRequestHandler
import urlparse
import urllib
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
class Handler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def log_message(self, format, *args):
@@ -68,19 +71,19 @@ class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
if self.server._client.file and urllib.unquote(url)[1:].decode("utf-8") == self.server._client.file.name:
range = False
self.offset=0
self.offset = 0
size, mime = self._file_info()
start, end = self.parse_range(self.headers.get('Range', ""))
self.size = size
if start <> None:
if start != None:
if end == None: end = size - 1
self.offset=int(start)
self.size=int(end) - int(start) + 1
range=(int(start), int(end), int(size))
else:
range = None
self.send_resp_header(mime, size, range)
return True
@@ -98,7 +101,7 @@ class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_resp_header(self, cont_type, size, range=False):
if range:
self.send_response(206, 'Partial Content')
else:
@@ -108,15 +111,14 @@ class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
self.send_header('Accept-Ranges', 'bytes')
if range:
if isinstance(range, (types.TupleType, types.ListType)) and len(range)==3:
if isinstance(range, (tuple, list)) and len(range)==3:
self.send_header('Content-Range', 'bytes %d-%d/%d' % range)
self.send_header('Content-Length', range[1]-range[0]+1)
else:
raise ValueError('Invalid range value')
else:
self.send_header('Content-Length', size)
self.send_header('Connection', 'close')
self.end_headers()

View File

@@ -1,14 +1,18 @@
import traceback
import BaseHTTPServer
import sys, traceback
if sys.version_info[0] >= 3:
from http.server import HTTPServer
else:
from BaseHTTPServer import HTTPServer
from SocketServer import ThreadingMixIn
from threading import Thread
from platformcode import logger
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
class Server(ThreadingMixIn, HTTPServer):
daemon_threads = True
timeout = 1
def __init__(self, address, handler, client):
BaseHTTPServer.HTTPServer.__init__(self,address,handler)
HTTPServer.__init__(self,address,handler)
self._client = client
self.running=True
self.request = None
@@ -21,7 +25,7 @@ class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
try:
self.handle_request()
except:
print traceback.format_exc()
logger.error(traceback.format_exc())
def run(self):
t=Thread(target=self.serve, name='HTTP Server')
@@ -30,4 +34,4 @@ class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
def handle_error(self, request, client_address):
if not "socket.py" in traceback.format_exc():
print traceback.format_exc()
logger.error(traceback.format_exc())

View File

@@ -6011,6 +6011,10 @@ msgctxt "#70806"
msgid "Changing this parameter permanently overwrites the Elementum settings.\nDo you want to continue?"
msgstr ""
msgctxt "#70807"
msgid "Elementum does not support network folder downloads, do you want to change the download location?"
msgstr "Elementum non supporta i download su cartella di rete, vuoi cambiare il percorso di download?"
# DNS start [ settings and declaration ]
msgctxt "#707401"
msgid "Enable DNS check alert"

View File

@@ -6011,6 +6011,10 @@ msgctxt "#70806"
msgid "Changing this parameter permanently overwrites the Elementum settings.\nDo you want to continue?"
msgstr "Modificando questo parametro vengono sovrascritte permanentemente le impostazioni di Elementum.\nVuoi continuare?"
msgctxt "#70807"
msgid "Elementum does not support network folder downloads, do you want to change the download location?"
msgstr "Elementum non supporta i download su cartella di rete, vuoi cambiare il percorso di download?"
# DNS start [ settings and declaration ]
msgctxt "#707401"
msgid "Enable DNS check alert"

View File

@@ -12,9 +12,8 @@ from platformcode import platformtools, logger
files = None
def test_video_exists(page_url):
types= "Archivo"
gen = "o"
msg = "El link tiene algún problema."
types= "File"
msg = "The link has a problem."
id_video = None
get = ""
seqno = random.randint(0, 0xFFFFFFFF)
@@ -29,25 +28,24 @@ def test_video_exists(page_url):
get = "&n=" + f_id
post = {"a":"f","c":1,"r":0}
isfolder = True
types= "Carpeta"
gen = "a"
types= "Folder"
if id_video:
#Aqui ya para hacer un check se complica, no hay una manera directa aún teniendo la id del video dentro de la carpeta
return True, ""
codes = {-1: 'Se ha producido un error interno en Mega.nz',
-2: 'Error en la petición realizada, Cod -2',
-3: 'Un atasco temporal o malfuncionamiento en el servidor de Mega impide que se procese su link',
-4: 'Ha excedido la cuota de transferencia permitida. Vuelva a intentarlo más tarde',
-6: types + ' no encontrad' + gen + ', cuenta eliminada',
-9: types + ' no encontrad'+ gen,
-11: 'Acceso restringido',
-13: 'Está intentando acceder a un archivo incompleto',
-14: 'Una operación de desencriptado ha fallado',
-15: 'Sesión de usuario expirada o invalida, logueese de nuevo',
-16: types + ' no disponible, la cuenta del uploader fue baneada',
-17: 'La petición sobrepasa su cuota de transferiencia permitida',
-18: types + ' temporalmente no disponible, intentelo de nuevo más tarde'
codes = {-1: 'An internal error has occurred in Mega.nz',
-2: 'Error in the request made, Cod -2',
-3: 'A temporary jam or malfunction in the Mega server prevents your link from being processed',
-4: 'You have exceeded the allowed transfer fee. Try it again later',
-6: types + ' not find deleted account',
-9: types + ' not find',
-11: 'Restricted access',
-13: 'You are trying to access an incomplete file',
-14: 'Decryption operation failed',
-15: 'User session expired or invalid, log in again',
-16: types + ' not available, the uploader account was banned',
-17: 'The request exceeds your allowable transfer fee',
-18: types + ' temporarily unavailable, please try again later'
}
api = 'https://g.api.mega.co.nz/cs?id=%d%s' % (seqno, get)
req_api = httptools.downloadpage(api, post=json.dumps([post])).data
@@ -65,16 +63,16 @@ def test_video_exists(page_url):
return False, msg
else:
#Comprobación limite cuota restante
from megaserver import Client
from lib.megaserver import Client
global c
c = Client(url=page_url, is_playing_fnc=platformtools.is_playing)
global files
files = c.get_files()
if files == 509:
msg1 = "[B][COLOR tomato]El video excede el limite de visionado diario que Mega impone a los usuarios Free."
msg1 += " Prueba en otro servidor o canal.[/B][/COLOR]"
msg1 = "The video exceeds the daily viewing limit."
return False, msg1
elif isinstance(files, (int, long)):
return False, "Error codigo %s" % str(files)
return False, "Error code %s" % str(files)
return True, ""

View File

@@ -73,13 +73,18 @@ def elementum_download(item):
if elementum_setting:
set_elementum(True)
time.sleep(3)
TorrentName = match(item.url, patron=r'btih(?::|%3A)([^&%]+)', string=True).match
post = 'uri=%s&file=null&all=1' % urllib.quote_plus(item.url)
match(elementum_host + 'add', post=post, timeout=5, alfa_s=True, ignore_response_code=True)
while not filetools.isfile(filetools.join(elementum_setting.getSetting('torrents_path'), TorrentName + '.torrent')):
time.sleep(1)
if config.get_setting('downloadpath').startswith('smb'):
select = platformtools.dialog_yesno('Elementum', config.get_localized_string(70807))
if select:
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?eyJjaGFubmVsIjoic2hvcnRjdXRzIiwgImFjdGlvbiI6IlNldHRpbmdPblBvc2l0aW9uIiwgImNhdGVnb3J5Ijo2LCAic2V0dGluZyI6MX0=)")
else:
TorrentName = match(item.url, patron=r'btih(?::|%3A)([^&%]+)', string=True).match
post = 'uri=%s&file=null&all=1' % urllib.quote_plus(item.url)
match(elementum_host + 'add', post=post, timeout=5, alfa_s=True, ignore_response_code=True)
while not filetools.isfile(filetools.join(elementum_setting.getSetting('torrents_path'), TorrentName + '.torrent')):
time.sleep(1)
monitor_update(TorrentPath, TorrentName)
monitor_update(TorrentPath, TorrentName)
def elementum_monitor():