From 36f9e691933c6551e000ec61fa5f95216e4cc993 Mon Sep 17 00:00:00 2001 From: Intel1 Date: Wed, 21 Nov 2018 09:22:00 -0500 Subject: [PATCH 01/24] Actualizados MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cinetux: fix enlaces danimados: fix enlaces de películas planetadocumental: Eliminados, solo tiene archivos .rar xms: fix enlaces descargacineclasico: fix enlaces bitp: fix enlaces dostream: fix enlaces --- plugin.video.alfa/channels/cinetux.py | 56 +++---- plugin.video.alfa/channels/danimados.py | 33 ++-- .../channels/planetadocumental.json | 22 --- .../channels/planetadocumental.py | 142 ------------------ plugin.video.alfa/channels/xms.py | 6 +- plugin.video.alfa/channels/yespornplease.py | 20 +-- plugin.video.alfa/lib/unshortenit.py | 2 +- plugin.video.alfa/servers/bitp.py | 2 +- plugin.video.alfa/servers/dostream.py | 24 ++- plugin.video.alfa/servers/streamplay.json | 2 +- 10 files changed, 62 insertions(+), 247 deletions(-) delete mode 100644 plugin.video.alfa/channels/planetadocumental.json delete mode 100644 plugin.video.alfa/channels/planetadocumental.py diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py index b770a215..303616d2 100644 --- a/plugin.video.alfa/channels/cinetux.py +++ b/plugin.video.alfa/channels/cinetux.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import re from channels import autoplay from channels import filtertools from core import httptools @@ -176,11 +175,11 @@ def destacadas(item): item.text_color = color2 data = httptools.downloadpage(item.url).data bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="letter_home"') - patron = '(?s)title="([^"]+)".*?' - patron += 'href="([^"]+)".*?' + patron = '(?s)href="([^"]+)".*?' + patron += 'alt="([^"]+)".*?' patron += 'src="([^"]+)' matches = scrapertools.find_multiple_matches(bloque, patron) - for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: scrapedurl = CHANNEL_HOST + scrapedurl itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, @@ -224,11 +223,12 @@ def findvideos(item): logger.info() itemlist=[] data = httptools.downloadpage(item.url).data - patron = 'class="title">.*?src.*?/>([^>]+).*?data-type="([^"]+).*?data-post="(\d+)".*?data-nume="(\d+)' - matches = re.compile(patron, re.DOTALL).findall(data) - #logger.info("Intel66") - #scrapertools.printMatches(matches) - for language, tp, pt, nm in matches: + patron = 'tooltipctx.*?data-type="([^"]+).*?' + patron += 'data-post="(\d+)".*?' + patron += 'data-nume="(\d+).*?' + patron += 'class="title">.*?src.*?/>([^<]+)' + matches = scrapertools.find_multiple_matches(data, patron) + for tp, pt, nm, language in matches: language = language.strip() post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp} post = urllib.urlencode(post) @@ -242,17 +242,12 @@ def findvideos(item): else: title = '' url = scrapertools.find_single_match(new_data, "src='([^']+)'") - #logger.info("Intel33 %s" %url) - url = get_url(url) - if "mega" not in url and "mediafire" not in url: + url = get_url(url.replace('\\/', '/')) + if url: itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality, language=IDIOMAS[language], infoLabels=item.infoLabels)) - #logger.info("Intel44") - #scrapertools.printMatches(itemlist) patron = "([^<]+)<" - matches = re.compile(patron, re.DOTALL).findall(data) - #logger.info("Intel66a") - #scrapertools.printMatches(matches) + matches = scrapertools.find_multiple_matches(data, patron) for hidden_url, quality, language in matches: if not config.get_setting('unify'): title = ' [%s][%s]' % (quality, IDIOMAS[language]) @@ -260,27 +255,32 @@ def findvideos(item): title = '' new_data = httptools.downloadpage(hidden_url).data url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"') - url = url.replace('\\/', '/') - url = get_url(url) - if "mega" not in url and "mediafire" not in url: + url = get_url(url.replace('\\/', '/')) + if url: itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality, language=IDIOMAS[language], infoLabels=item.infoLabels)) - #logger.info("Intel55") - #scrapertools.printMatches(itemlist) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + itemlist.sort(key=lambda it: (it.language, it.server, it.quality)) + tmdb.set_infoLabels(itemlist, __modo_grafico__) return itemlist def get_url(url): + logger.info() if "cinetux.me" in url: d1 = httptools.downloadpage(url).data - if "mail" in url: - id = scrapertools.find_single_match(d1, ' 0 and item.contentType=="movie" and item.contentChannel!='videolibrary': diff --git a/plugin.video.alfa/channels/planetadocumental.json b/plugin.video.alfa/channels/planetadocumental.json deleted file mode 100644 index ecc6eef7..00000000 --- a/plugin.video.alfa/channels/planetadocumental.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "planetadocumental", - "name": "Planeta documental", - "language": ["*"], - "active": true, - "adult": false, - "thumbnail": "https://s8.postimg.cc/r6njedwdt/planeta_documental1.png", - "banner": "https://s8.postimg.cc/6za3m36m9/planeta_documental2.png", - "categories": [ - "documentary" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/planetadocumental.py b/plugin.video.alfa/channels/planetadocumental.py deleted file mode 100644 index 52f973ae..00000000 --- a/plugin.video.alfa/channels/planetadocumental.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel Planeta Documental -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- - -from core import httptools -from core import jsontools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from channelselector import get_thumb -from platformcode import config, logger -from channels import autoplay -from channels import filtertools - - -IDIOMAS = {"Latino": "LAT"} -list_language = IDIOMAS.values() - -list_quality = [] - -list_servers = ['gvideo'] - -host = "https://www.planetadocumental.com" - -def mainlist(item): - logger.info() - itemlist = [] - autoplay.init(item.channel, list_servers, list_quality) - itemlist.append(item.clone(title="Últimos documentales", action="lista", - url= host, - thumbnail=get_thumb('lastest', auto=True))) - itemlist.append(item.clone(title="Por genero", action="generos", - url= host, thumbnail=get_thumb('genres', auto=True))) - itemlist.append(item.clone(title="", action="")) - itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True))) - - return itemlist - - - -def generos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - bloque = scrapertools.find_single_match(data, 'sub-menu elementor-nav-menu--dropdown(.*?)/search[^\"']+).*?>(?P[^<>]+).*?badge[^>]+>(?P\d+)", data, re.DOTALL | re.MULTILINE) for url, name, counter in categories: result.append(item.clone(action = "links", title = "%s (%s videos)" % (name, counter), url = urljoin(item.url, url))) - return result + def get_page(url): page = re.search("p=(\d+)", url) if page: return int(page.group(1)) return 1 + def get_page_url(url, page): logger.debug("URL: %s to page %d" % (url, page)) resultURL = re.sub("([&\?]p=)(?:\d+)", "\g<1>%d" % page, url) if resultURL == url: resultURL += ("&" if "?" in url else "?") + "p=%d" % (page) - logger.debug("Result: %s" % (resultURL)) return resultURL @@ -64,21 +62,15 @@ def get_page_url(url, page): def links(item): logger.info() data = httptools.downloadpage(item.url).data - reExpr = "[^'\"]+)[^>]+(?:title|alt)[^'\"]*['\"](?P[^\"]+)[^>]+id[^'\"]*['\"](?P<id>[^'\"]+)[^>]*>(?:[^<]*<[^>]+>(?P<quality>[^<]+)<)?[^<]*<[^>]*duration[^>]*>(?P<duration>[^<]+)" reResults = re.findall(reExpr, data, re.MULTILINE | re.DOTALL) result = [] - for img, title, vID, quality, duration in reResults: - logger.info("[link] %(title)s [%(quality)s] [%(duration)s]: %(vid)s (%(img)s" % ({"title": title, "duration": duration, "vid": vID, "img": img, "quality": quality if quality else "--"})) - formattedQuality = "" if quality: formattedQuality += " [%s]" % (quality) - titleFormatted = "%(title)s%(quality)s [%(duration)s]" % ({"title": title, "quality": formattedQuality, "duration": duration}) result.append(item.clone(action = "play", title = titleFormatted, url = urljoin(item.url, "/view/%s" % (vID)), thumbnail = urljoin(item.url, img), vID = vID)) - # Has pagination paginationOccurences = data.count('class="prevnext"') if paginationOccurences: @@ -86,13 +78,11 @@ def links(item): logger.info("Page " + str(page) + " Ocurrences: " + str(paginationOccurences)) if page > 1: result.append(item.clone(action = "links", title = "<< Anterior", url = get_page_url(item.url, page - 1))) - if paginationOccurences > 1 or page == 1: result.append(item.clone(action = "links", title = "Siguiente >>", url = get_page_url(item.url, page + 1))) - - return result + def play(item): logger.info(item) embededURL = urljoin(item.url, "/view/%s" % (item.vID)) diff --git a/plugin.video.alfa/lib/unshortenit.py b/plugin.video.alfa/lib/unshortenit.py index 664e3bfe..9e12f629 100755 --- a/plugin.video.alfa/lib/unshortenit.py +++ b/plugin.video.alfa/lib/unshortenit.py @@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL): class UnshortenIt(object): - _adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net' + _adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net' _linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co' _adfocus_regex = r'adfoc\.us' _lnxlu_regex = r'lnx\.lu' diff --git a/plugin.video.alfa/servers/bitp.py b/plugin.video.alfa/servers/bitp.py index 2070f04c..45d2a2f2 100644 --- a/plugin.video.alfa/servers/bitp.py +++ b/plugin.video.alfa/servers/bitp.py @@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data - videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?data-res="([^"]+)') + videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?title="([^"]+)') scrapertools.printMatches(videourl) for scrapedurl, scrapedquality in videourl: if "loadthumb" in scrapedurl: diff --git a/plugin.video.alfa/servers/dostream.py b/plugin.video.alfa/servers/dostream.py index 4d23a236..7184d286 100644 --- a/plugin.video.alfa/servers/dostream.py +++ b/plugin.video.alfa/servers/dostream.py @@ -3,33 +3,27 @@ # Conector DoStream By Alfa development Group # -------------------------------------------------------- -import re from core import httptools +from core import scrapertools from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) - data = httptools.downloadpage(page_url) - if data.code == 404: - return False, "[Dostream] El archivo no existe o ha sido borrado" - + return False, "[Dostream] El archivo no existe o ha sido borrado" return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) - video_urls = [] - data = httptools.downloadpage(page_url).data - data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) - logger.debug(data) - patron = "(?:'src'|'url'):'(http.*?)'" - matches = re.compile(patron, re.DOTALL).findall(data) - - for url in matches: - video_urls.append(['dostream',url]) - + data = httptools.downloadpage(page_url, headers={"Referer":page_url}).data + patron = '"label":"([^"]+)".*?' + patron += '"src":"(http.*?)".*?' + matches = scrapertools.find_multiple_matches(data, patron) + for label, url in matches: + video_urls.append(['%s [dostream]' %label, url]) + video_urls.sort(key=lambda it: int(it[0].split("p ")[0])) return video_urls diff --git a/plugin.video.alfa/servers/streamplay.json b/plugin.video.alfa/servers/streamplay.json index eec0fb52..22cf998a 100755 --- a/plugin.video.alfa/servers/streamplay.json +++ b/plugin.video.alfa/servers/streamplay.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "streamplay.to/(?:embed-|player-|)([a-z0-9]+)(?:.html|)", + "pattern": "streamplay.(?:to|me)/(?:embed-|player-|)([a-z0-9]+)(?:.html|)", "url": "http://streamplay.to/player-\\1.html" } ] From 86a4c16180ed48d97ffe04e492f6f5066101ea31 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 21 Nov 2018 10:35:40 -0500 Subject: [PATCH 02/24] unshortenit: actualizado --- plugin.video.alfa/lib/unshortenit.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugin.video.alfa/lib/unshortenit.py b/plugin.video.alfa/lib/unshortenit.py index 9e12f629..7c25e755 100755 --- a/plugin.video.alfa/lib/unshortenit.py +++ b/plugin.video.alfa/lib/unshortenit.py @@ -14,7 +14,7 @@ import urllib from base64 import b64decode from core import httptools -from platformcode import config +from platformcode import config, logger def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL): @@ -355,7 +355,6 @@ class UnshortenIt(object): try: r = httptools.downloadpage(uri, timeout=self._timeout) html = r.data - session_id = re.findall(r'sessionId\:(.*?)\"\,', html) if len(session_id) > 0: session_id = re.sub(r'\s\"', '', session_id[0]) @@ -366,8 +365,9 @@ class UnshortenIt(object): http_header["Referer"] = uri http_header["Origin"] = "http://sh.st" http_header["X-Requested-With"] = "XMLHttpRequest" - + if config.is_xbmc(): + import xbmc xbmc.sleep(5 * 1000) else: time.sleep(5 * 1000) From 13ad03beca72914485d4b4d70de124689ce381ff Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 21 Nov 2018 10:43:01 -0500 Subject: [PATCH 03/24] cinecalidad y pelisplusco: fix --- plugin.video.alfa/channels/cinecalidad.py | 13 +++++++--- plugin.video.alfa/channels/pelisplusco.py | 29 +++++++++++++---------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/plugin.video.alfa/channels/cinecalidad.py b/plugin.video.alfa/channels/cinecalidad.py index 912048a7..ec18bc95 100644 --- a/plugin.video.alfa/channels/cinecalidad.py +++ b/plugin.video.alfa/channels/cinecalidad.py @@ -433,7 +433,14 @@ def newest(categoria): def search(item, texto): logger.info() + itemlist = [] texto = texto.replace(" ", "-") - item.url = item.host + '?s=' + texto - if texto != '': - return peliculas(item) + if item.host != '': + host_list = [item.host] + else: + host_list = ['http://www.cinecalidad.to', 'http://cinecalidad.to/espana/'] + for host_name in host_list: + item.url = host_name + '?s=' + texto + if texto != '': + itemlist.extend(peliculas(item)) + return itemlist \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisplusco.py b/plugin.video.alfa/channels/pelisplusco.py index 744de2ca..570a4082 100644 --- a/plugin.video.alfa/channels/pelisplusco.py +++ b/plugin.video.alfa/channels/pelisplusco.py @@ -143,10 +143,12 @@ def series_menu(item): return itemlist -def get_source(url): - +def get_source(url, referer=None): logger.info() - data = httptools.downloadpage(url).data + if referer is None: + data = httptools.downloadpage(url).data + else: + data = httptools.downloadpage(url, headers={'Referer':referer, 'x-requested-with': 'XMLHttpRequest'}).data data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data) return data @@ -173,7 +175,7 @@ def list_all (item): matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches: - url = host+scrapedurl+'p001/' + url = host+scrapedurl thumbnail = scrapedthumbnail contentTitle=scrapedtitle title = contentTitle @@ -349,16 +351,15 @@ def season_episodes(item): def get_links_by_language(item, data): logger.info() - video_list = [] - language = scrapertools.find_single_match(data, 'ul id=level\d_(.*?)\s*class=') - patron = 'data-source=(.*?)data.*?srt=(.*?)data-iframe.*?Opci.*?<.*?hidden>[^\(]\((.*?)\)' + language = scrapertools.find_single_match(data, 'ul id="level\d_([^"]+)"\s*class=') + patron = 'data-source="([^"]+)"data-quality="([^"]+)"data-srt="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) if language in IDIOMAS: language = IDIOMAS[language] - for url, sub, quality in matches: + for url, quality, sub in matches: if 'http' not in url: new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \ @@ -391,15 +392,19 @@ def findvideos(item): logger.info() itemlist = [] video_list = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + if item.contentType == 'movie': + new_url = new_url = item.url.replace('/pelicula/', '/player/%s/' % item.contentType) + else: + base_url = scrapertools.find_single_match(item.url, '(.*?)/temporada') + new_url = base_url.replace('/serie/', '/player/serie/') + new_url += '|%s|%s/' % (item.contentSeason, item.contentEpisodeNumber) + data = get_source(new_url, referer=item.url) - patron_language ='(<ul id=level\d_.*?\s*class=.*?ul>)' + patron_language ='(<ul id="level\d_.*?"*class=.*?ul>)' matches = re.compile(patron_language, re.DOTALL).findall(data) for language in matches: video_list.extend(get_links_by_language(item, language)) - video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % (i.server.capitalize(), i.language, i.quality) ) # Requerido para FilterTools From fc04c143ecc00953936f6226cbeb8e0c8936a416 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 21 Nov 2018 18:48:40 +0100 Subject: [PATCH 04/24] Newpct1: mejora en la disponibilidad --- plugin.video.alfa/channels/newpct1.py | 18 +++++++++++++---- plugin.video.alfa/channels/videolibrary.py | 3 --- plugin.video.alfa/lib/generictools.py | 23 ++++++++++++++-------- 3 files changed, 29 insertions(+), 15 deletions(-) diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index 1bdba2d3..e456b356 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -133,8 +133,11 @@ def mainlist(item): thumbnail=thumb_docus, category=item.category, channel_host=item.channel_host)) itemlist.append( Item(channel=item.channel, action="search", title="Buscar", url=item.channel_host + "buscar", thumbnail=thumb_buscar, category=item.category, channel_host=item.channel_host)) - - itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador, category=item.category, channel_host=item.channel_host)) + + clone_act = 'Clone: ' + if config.get_setting('clonenewpct1_channel_default', channel_py) == 0: + clone_act = 'Aleatorio: ' + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR] (" + clone_act + item.category + ")", folder=False, thumbnail=thumb_separador, category=item.category, channel_host=item.channel_host)) itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configurar canal", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host)) @@ -243,9 +246,8 @@ def submenu_novedades(item): item.extra2 = '' #Renombramos el canal al nombre de clone inicial desde la URL - host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)') item.channel_host = host - item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize() + item.category = channel_clone_name.capitalize() data = '' timeout_search=timeout * 2 #Más tiempo para Novedades, que es una búsqueda @@ -2051,7 +2053,15 @@ def episodios(item): if match['episode'] is None: match['episode'] = "0" try: match['season'] = int(match['season']) + season_alt = match['season'] match['episode'] = int(match['episode']) + if match['season'] > max_temp: + logger.error("ERROR 07: EPISODIOS: Error en número de Temporada o Episodio: " + " / TEMPORADA/EPISODIO: " + str(match['season']) + " / " + str(match['episode']) + " / NUM_TEMPORADA: " + str(max_temp) + " / " + str(season) + " / MATCHES: " + str(matches)) + match['season'] = scrapertools.find_single_match(item_local.url, '\/[t|T]emp\w+-*(\d+)\/') + if not match['season']: + match['season'] = season_alt + else: + match['season'] = int(match['season']) except: logger.error("ERROR 07: EPISODIOS: Error en número de Temporada o Episodio: " + " / TEMPORADA/EPISODIO: " + str(match['season']) + " / " + str(match['episode']) + " / NUM_TEMPORADA: " + str(max_temp) + " / " + str(season) + " / MATCHES: " + str(matches)) diff --git a/plugin.video.alfa/channels/videolibrary.py b/plugin.video.alfa/channels/videolibrary.py index 1212ec4d..e2edd5c1 100644 --- a/plugin.video.alfa/channels/videolibrary.py +++ b/plugin.video.alfa/channels/videolibrary.py @@ -62,7 +62,6 @@ def list_movies(item, silent=False): for canal_org in new_item.library_urls: canal = generictools.verify_channel(canal_org) - logger.error(canal) try: channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal]) logger.debug('El canal %s parece correcto' % channel_verify) @@ -286,8 +285,6 @@ def list_tvshows(item): # logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n')) ## verifica la existencia de los canales ## - - logger.debug(item_tvshow) if len(item_tvshow.library_urls) > 0: itemlist.append(item_tvshow) diff --git a/plugin.video.alfa/lib/generictools.py b/plugin.video.alfa/lib/generictools.py index 5647fb0d..eb549794 100644 --- a/plugin.video.alfa/lib/generictools.py +++ b/plugin.video.alfa/lib/generictools.py @@ -863,6 +863,7 @@ def post_tmdb_episodios(item, itemlist): #Componemos el título final, aunque con Unify usará infoLabels['episodio_titulo'] item_local.infoLabels['title'] = item_local.infoLabels['episodio_titulo'] + item_local.title = item_local.title.replace("[", "-").replace("]", "-") item_local.title = '%s [%s] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language)) #Quitamos campos vacíos @@ -871,7 +872,7 @@ def post_tmdb_episodios(item, itemlist): item_local.title = item_local.title.replace(" []", "").strip() item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?-?\s?\]?\]\[\/COLOR\]', '', item_local.title).strip() item_local.title = re.sub(r'\s?\[COLOR \w+\]-?\s?\[\/COLOR\]', '', item_local.title).strip() - item_local.title = item_local.title.replace("[", "-").replace("]", "-").replace(".", ",").replace("GB", "G B").replace("Gb", "G b").replace("gb", "g b").replace("MB", "M B").replace("Mb", "M b").replace("mb", "m b") + item_local.title = item_local.title.replace(".", ",").replace("GB", "G B").replace("Gb", "G b").replace("gb", "g b").replace("MB", "M B").replace("Mb", "M b").replace("mb", "m b") #Si la información de num. total de episodios de TMDB no es correcta, tratamos de calcularla if num_episodios < item_local.contentEpisodeNumber: @@ -1161,7 +1162,8 @@ def post_tmdb_findvideos(item, itemlist): if item.channel_alt: title_gen = '[COLOR yellow]%s [/COLOR][ALT]: %s' % (item.category.capitalize(), title_gen) - elif (config.get_setting("quit_channel_name", "videolibrary") == 1 or item.channel == channel_py) and item.contentChannel == "videolibrary": + #elif (config.get_setting("quit_channel_name", "videolibrary") == 1 or item.channel == channel_py) and item.contentChannel == "videolibrary": + else: title_gen = '%s: %s' % (item.category.capitalize(), title_gen) #Si intervención judicial, alerto!!! @@ -1814,6 +1816,9 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F if item.channel_host: #y se borran resto de pasadas anteriores del item.channel_host + if it.emergency_urls: + item.emergency_urls = it.emergency_urls #Refrescar desde el .nfo + #Analizamos si hay series o películas que migrar, debido a que se ha activado en el .json del canal la opción "guardar" #"emergency_urls = 1", y hay que calcularla para todos los episodios y película existentes en la Videoteca. #Si "emergency_urls" está activada para uno o más canales, se verifica en el .nfo del vídeo si ya se ha realizado @@ -1824,10 +1829,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F #automáticamente. En el caso de peliculas, se general aquí el json actualizado y se marca el .nfo como actualizado. #Cuando en el .json se activa "Borrar", "emergency_urls = 2", se borran todos los enlaces existentes #Cuando en el .json se activa "Actualizar", "emergency_urls = 3", se actualizan todos los enlaces existentes - - if it.emergency_urls: - item.emergency_urls = it.emergency_urls #Refrescar desde el .nfo - + """ verify_cached_torrents() #TEMPORAL: verificamos si los .torrents son correctos try: #Si ha habido errores, vemos la lista y los reparamos json_error_path = filetools.join(config.get_runtime_path(), 'error_cached_torrents.json') @@ -1875,7 +1877,12 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F if not encontrado: logger.error('REGENERANDO: ' + str(item.emergency_urls)) item.emergency_urls.pop(channel_alt, None) - + except: + logger.error('Error en el proceso de RECARGA de URLs de Emergencia') + logger.error(traceback.format_exc()) + """ + + try: if item.url: #Viene de actualización de videoteca de series #Analizamos si el canal ya tiene las urls de emergencia: guardar o borrar if (config.get_setting("emergency_urls", item.channel) == 1 and (not item.emergency_urls or (item.emergency_urls and not item.emergency_urls.get(channel_alt, False)))) or (config.get_setting("emergency_urls", item.channel) == 2 and item.emergency_urls.get(channel_alt, False)) or config.get_setting("emergency_urls", item.channel) == 3 or emergency_urls_force: @@ -1894,7 +1901,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F except: logger.error('Error en el proceso de ALMACENAMIENTO de URLs de Emergencia') logger.error(traceback.format_exc()) - + #Ahora tratamos las webs intervenidas, tranformamos la url, el nfo y borramos los archivos obsoletos de la serie if channel not in intervencion and channel_py_alt not in intervencion and category not in intervencion and channel_alt != 'videolibrary': #lookup return (item, it, overwrite) #... el canal/clone está listado From eaa2339c1600cc78c5377e33f1c6f3612dd64013 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 21 Nov 2018 18:49:42 +0100 Subject: [PATCH 05/24] Enlaces de Emergencia MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adaptación de canales: - Mejortorrent - Mejortorrent1 --- plugin.video.alfa/channels/mejortorrent.py | 106 ++++++++++++++---- plugin.video.alfa/channels/mejortorrent1.json | 22 ++++ plugin.video.alfa/channels/mejortorrent1.py | 89 +++++++++++---- plugin.video.alfa/core/videolibrarytools.py | 2 +- 4 files changed, 172 insertions(+), 47 deletions(-) diff --git a/plugin.video.alfa/channels/mejortorrent.py b/plugin.video.alfa/channels/mejortorrent.py index 722c559e..63e25330 100755 --- a/plugin.video.alfa/channels/mejortorrent.py +++ b/plugin.video.alfa/channels/mejortorrent.py @@ -265,8 +265,8 @@ def listado(item): del item.next_page #logger.debug(data) - logger.debug("PATRON1: " + patron + " / ") - logger.debug(matches) + #logger.debug("PATRON1: " + patron + " / ") + #logger.debug(matches) # Primera pasada # En la primera pasada se obtiene una información básica del título a partir de la url @@ -360,8 +360,8 @@ def listado(item): cnt_pag += cnt_tot cnt_pag_num += 1 - logger.debug("PATRON2: " + patron_title) - logger.debug(matches) + #logger.debug("PATRON2: " + patron_title) + #logger.debug(matches) cnt = 0 for scrapedtitle, notused, scrapedinfo in matches: item_local = itemlist[cnt] #Vinculamos item_local con la entrada de la lista itemlist (más fácil de leer) @@ -763,6 +763,12 @@ def findvideos(item): itemlist_f = [] #Itemlist de enlaces filtrados if not item.language: item.language = ['CAST'] #Castellano por defecto + matches = [] + + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls = [] + item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales #Bajamos los datos de la página data = '' @@ -775,24 +781,47 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) patron = "<a href='(secciones.php\?sec\=descargas&ap=contar&tabla=[^']+)'" except: + pass + + if not data: logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos - matches = re.compile(patron, re.DOTALL).findall(data) + if not item.armagedon: #Si es un proceso normal, seguimos + matches = re.compile(patron, re.DOTALL).findall(data) if not matches: item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada if item.intervencion: #Sí ha sido clausurada judicialmente item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error - return itemlist #Salimos - - logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data) - itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + elif not item.armagedon: + logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data) + itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log')) + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos #logger.debug(data) #logger.debug("PATRON: " + patron) #logger.debug(matches) + + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls.append(matches) #Salvamnos matches... #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) @@ -803,41 +832,69 @@ def findvideos(item): url = urlparse.urljoin(item.url, scrapedurl) # Localiza el .torrent en el siguiente link - if not item.post: # Si no es llamada con Post, hay que bajar un nivel más + if not item.post and not item.armagedon: # Si no es llamada con Post, hay que bajar un nivel más try: torrent_data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url).data) - except: #error + except: #error + pass + + if not torrent_data: logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / DATA: " + data) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + if len(item.emergency_urls[0]): + item_local.url = item.emergency_urls[0][0] #Restauramos la primera url + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + #logger.debug(torrent_data) - item_local.url = scrapertools.get_match(torrent_data, ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'") - item_local.url = urlparse.urljoin(url, item_local.url) - else: + if not item.armagedon: + item_local.url = scrapertools.get_match(torrent_data, ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'") + item_local.url = urlparse.urljoin(url, item_local.url) + + elif not item.armagedon: item_local.url = url # Ya teníamos el link desde el primer nivel (documentales) item_local.url = item_local.url.replace(" ", "%20") + + if item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls: + if len(item.emergency_urls[0]): + item_local.url = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent + if len(item.emergency_urls[0]) > 1: + del item.emergency_urls[0][0] + if not item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls: + if len(item.emergency_urls[0]): + item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent ALTERNATIVA + + if item.videolibray_emergency_urls: + item.emergency_urls[0].append(item_local.url) #Salvamnos la url... # Poner la calidad, si es necesario if not item_local.quality: if "hdtv" in item_local.url.lower() or "720p" in item_local.url.lower() or "1080p" in item_local.url.lower() or "4k" in item_local.url.lower(): item_local.quality = scrapertools.find_single_match(item_local.url, '.*?_([H|7|1|4].*?)\.torrent') item_local.quality = item_local.quality.replace("_", " ") + if item.armagedon: #Si es catastrófico, lo marcamos + item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality # Extrae la dimensión del vídeo size = scrapertools.find_single_match(item_local.url, '(\d{1,3},\d{1,2}?\w+)\.torrent') - size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra - if not size: + size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra + if not size and not item.armagedon: size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent if size: item_local.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título item_local.quality = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía - item_local.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad + item_local.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad #Ahora pintamos el link del Torrent, si lo hay - if item_local.url: # Hay Torrent ? - item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent - + if item_local.url: # Hay Torrent ? + item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent + #Preparamos título y calidad, quitamos etiquetas vacías item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title) item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title) @@ -858,6 +915,9 @@ def findvideos(item): #logger.debug("title=[" + item.title + "], torrent=[ " + item_local.url + " ], url=[ " + url + " ], post=[" + item.post + "], thumbnail=[ " + item.thumbnail + " ]" + " size: " + size) + if item.videolibray_emergency_urls: + return item + if len(itemlist_f) > 0: #Si hay entradas filtradas... itemlist.extend(itemlist_f) #Pintamos pantalla filtrada else: diff --git a/plugin.video.alfa/channels/mejortorrent1.json b/plugin.video.alfa/channels/mejortorrent1.json index c17a3407..c5d2aa2f 100644 --- a/plugin.video.alfa/channels/mejortorrent1.json +++ b/plugin.video.alfa/channels/mejortorrent1.json @@ -54,6 +54,28 @@ "VOSE" ] }, + { + "id": "emergency_urls", + "type": "list", + "label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?", + "default": 1, + "enabled": true, + "visible": true, + "lvalues": [ + "No", + "Guardar", + "Borrar", + "Actualizar" + ] + }, + { + "id": "emergency_urls_torrents", + "type": "bool", + "label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?", + "default": true, + "enabled": true, + "visible": "!eq(-1,'No')" + }, { "id": "seleccionar_ult_temporadda_activa", "type": "bool", diff --git a/plugin.video.alfa/channels/mejortorrent1.py b/plugin.video.alfa/channels/mejortorrent1.py index 6814d79e..23062782 100644 --- a/plugin.video.alfa/channels/mejortorrent1.py +++ b/plugin.video.alfa/channels/mejortorrent1.py @@ -165,8 +165,8 @@ def listado(item): item.contentType = "movie" pag = False #No hay paginación elif (item.extra == "peliculas" or item.extra == "varios") and not item.tipo: #Desde Menú principal - patron = '<a href="([^"]+)">?<img src="([^"]+)"[^<]+<\/a>' - patron_enlace = '\/\/.*?\/(.*?)\/$' + patron = '<a href="([^"]+)"[^>]+>?<img src="([^"]+)"[^<]+<\/a>' + patron_enlace = '\/\/.*?\/(8.*?)\/$' patron_title = '<a href="[^"]+">([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?' item.action = "findvideos" item.contentType = "movie" @@ -184,7 +184,7 @@ def listado(item): pag = False cnt_tot = 10 # Se reduce el numero de items por página porque es un proceso pesado elif item.extra == "series" and not item.tipo: - patron = '<a href="([^"]+)">?<img src="([^"]+)"[^<]+<\/a>' + patron = '<a href="([^"]+)"[^>]+>?<img src="([^"]+)"[^<]+<\/a>' patron_enlace = '\/\/.*?\/(.*?)-[temporada]?\d+[-|x]' patron_title = '<a href="[^"]+">([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?' patron_title_ep = '\/\/.*?\/(.*?)-(\d{1,2})x(\d{1,2})(?:-al-\d{1,2}x\d{1,2})?-?(\d+p)?\/$' @@ -203,7 +203,7 @@ def listado(item): item.contentType = "tvshow" pag = False else: - patron = '<a href="([^"]+)">?<img src="([^"]+)"[^<]+<\/a>' + patron = '<a href="([^"]+)"[^>]+>?<img src="([^"]+)"[^<]+<\/a>' patron_enlace = '\/\/.*?\/(.*?)-[temporada]?\d+[-|x]' patron_title = '<a href="[^"]+">([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?' patron_title_ep = '\/\/.*?\/(.*?)-(\d{1,2})x(\d{1,2})(?:-al-\d{1,2}x\d{1,2})?-?(\d+p)?\/$' @@ -813,6 +813,7 @@ def findvideos(item): itemlist_f = [] #Itemlist de enlaces filtrados if not item.language: item.language = ['CAST'] #Castellano por defecto + matches = [] #logger.debug(item) @@ -823,6 +824,11 @@ def findvideos(item): #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls = [] + item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales + #Bajamos los datos de la página de todo menos de Documentales y Varios if not item.post: try: @@ -836,34 +842,54 @@ def findvideos(item): if not data: logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos - matches = re.compile(patron, re.DOTALL).findall(data) + if not item.armagedon: #Si es un proceso normal, seguimos + matches = re.compile(patron, re.DOTALL).findall(data) if not matches: item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada if item.intervencion: #Sí ha sido clausurada judicialmente item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error - return itemlist #Salimos + else: + logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data) + itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log')) - logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data) - itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos #logger.debug("PATRON: " + patron) #logger.debug(matches) #logger.debug(data) - for scrapedurl, name1, value1, value2, name2 in matches: #Hacemos el FOR aunque solo habrá un item + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls.append(matches) #Salvamnos matches... + + for scrapedurl, name1, value1, value2, name2 in matches: #Hacemos el FOR aunque solo habrá un item #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() url = scrapedurl # Localiza el .torrent en el siguiente link con Post post = '%s=%s&%s=%s' % (name1, value1, name2, value2) - try: - torrent_data = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=False) - except: #error - pass + if not item.armagedon: + try: + torrent_data = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=False) + except: #error + pass else: #Viene de SERIES y DOCUMENTALES. Generamos una copia de Item para trabajar sobre ella @@ -874,19 +900,34 @@ def findvideos(item): except: pass - if not torrent_data: + if not torrent_data or not 'location' in torrent_data.headers or not torrent_data.headers['location']: item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada if item.intervencion: #Sí ha sido clausurada judicialmente item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error - return itemlist #Salimos + elif not item.armagedon: + logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / DATA: " + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log')) - logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / DATA: " + data) - itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + item_local.url = item.emergency_urls[0][0] #Restauramos la url del .torrent + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos #Capturamos la url del .torrent desde el Header - item_local.url = torrent_data.headers['location'] if 'location' in torrent_data.headers else item.url_post - item_local.url = item_local.url.replace(" ", "%20") #Quitamos espacios + if not item.armagedon: + item_local.url = torrent_data.headers['location'] if 'location' in torrent_data.headers else item.url_post + item_local.url = item_local.url.replace(" ", "%20") #Quitamos espacios + if item.emergency_urls: + item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA + + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls[0].append(item_local.url) #Salvamnos la url... + return item #... y nos vamos # Poner la calidad, si es necesario if not item_local.quality: @@ -896,6 +937,8 @@ def findvideos(item): elif "hdtv" in item_local.url.lower() or "720p" in item_local.url.lower() or "1080p" in item_local.url.lower() or "4k" in item_local.url.lower(): item_local.quality = scrapertools.find_single_match(item_local.url, '.*?_([H|7|1|4].*?)\.torrent') item_local.quality = item_local.quality.replace("_", " ") + if item.armagedon: #Si es catastrófico, lo marcamos + item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality # Extrae el tamaño del vídeo if scrapertools.find_single_match(data, '<b>Tama.*?:<\/b>&\w+;\s?([^<]+B)<?'): @@ -903,7 +946,7 @@ def findvideos(item): else: size = scrapertools.find_single_match(item_local.url, '(\d{1,3},\d{1,2}?\w+)\.torrent') size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra - if not size: + if not size and not item.armagedon: size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent if size: item_local.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía diff --git a/plugin.video.alfa/core/videolibrarytools.py b/plugin.video.alfa/core/videolibrarytools.py index a11c9674..b4964de2 100644 --- a/plugin.video.alfa/core/videolibrarytools.py +++ b/plugin.video.alfa/core/videolibrarytools.py @@ -801,7 +801,7 @@ def caching_torrents(url, torrents_path=None, timeout=10, lookup=False, data_tor torrents_path += '.torrent' #path para dejar el .torrent torrents_path_encode = filetools.encode(torrents_path) #encode utf-8 del path - if url.endswith(".rar"): #No es un archivo .torrent + if url.endswith(".rar") or url.startswith("magnet:"): #No es un archivo .torrent logger.error('No es un archivo Torrent: ' + url) torrents_path = '' if data_torrent: From 5a8be6902d887564099f8ece04e8ad25e7026371 Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Thu, 22 Nov 2018 08:25:44 -0500 Subject: [PATCH 06/24] 2.7.14 --- plugin.video.alfa/addon.xml | 56 ++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 1c319169..be86f063 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<addon id="plugin.video.alfa" name="Alfa" version="2.7.13" provider-name="Alfa Addon"> +<addon id="plugin.video.alfa" name="Alfa" version="2.7.14" provider-name="Alfa Addon"> <requires> <import addon="xbmc.python" version="2.1.0"/> <import addon="script.module.libtorrent" optional="true"/> @@ -19,15 +19,51 @@ </assets> <news>[B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Arreglos[/B][/COLOR] - ¤ cinetux ¤ porntrex ¤ repelis - ¤ fembed ¤ uptobox ¤ vivo - ¤ seriesmetro ¤ DivxTotal ¤ EliteTorrent - ¤ EstrenosGo ¤ GranTorrent - - [COLOR green][B]Novedades[/B][/COLOR] - ¤ Pack canales +18 - - Agradecimientos a @paeznet por colaborar en ésta versión + ¤ cinetux ¤ danimados ¤ xms + ¤ bitp ¤ descargacineclasico ¤ dostream + ¤ cinecalidad ¤ pelisplus ¤ Mejortorrent + ¤ Mejortorrent1 ¤ Newpc1 + + </news> + <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> + <summary lang="en">Browse web pages using Kodi</summary> + <description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description> + <disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]</disclaimer> + <platform>all</platform> + <license>GNU GPL v3</license> + <forum>foro</forum> + <website>web</website> + <email>my@email.com</email> + <source>https://github.com/alfa-addon/addon</source> + </extension> + <extension point="xbmc.service" library="videolibrary_service.py" start="login|startup"> + </extension> +</addon> +<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<addon id="plugin.video.alfa" name="Alfa" version="2.7.14" provider-name="Alfa Addon"> + <requires> + <import addon="xbmc.python" version="2.1.0"/> + <import addon="script.module.libtorrent" optional="true"/> + </requires> + <extension point="xbmc.python.pluginsource" library="default.py"> + <provides>video</provides> + </extension> + <extension point="xbmc.addon.metadata"> + <summary lang="es">Navega con Kodi por páginas web.</summary> + <assets> + <icon>logo-cumple.png</icon> + <fanart>fanart.jpg</fanart> + <screenshot>resources/media/themes/ss/1.jpg</screenshot> + <screenshot>resources/media/themes/ss/2.jpg</screenshot> + <screenshot>resources/media/themes/ss/3.jpg</screenshot> + <screenshot>resources/media/themes/ss/4.jpg</screenshot> + </assets> + <news>[B]Estos son los cambios para esta versión:[/B] + [COLOR green][B]Arreglos[/B][/COLOR] + ¤ cinetux ¤ danimados ¤ xms + ¤ bitp ¤ descargacineclasico ¤ dostream + ¤ cinecalidad ¤ pelisplus ¤ Mejortorrent + ¤ Mejortorrent1 ¤ Newpc1 </news> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> From a48dec32d740c0ecccfe99472b4de00876126742 Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Thu, 22 Nov 2018 13:04:16 -0500 Subject: [PATCH 07/24] v2.7.14.1 --- plugin.video.alfa/addon.xml | 41 ------------------------------------- 1 file changed, 41 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index be86f063..61b48f44 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -39,44 +39,3 @@ <extension point="xbmc.service" library="videolibrary_service.py" start="login|startup"> </extension> </addon> -<?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<addon id="plugin.video.alfa" name="Alfa" version="2.7.14" provider-name="Alfa Addon"> - <requires> - <import addon="xbmc.python" version="2.1.0"/> - <import addon="script.module.libtorrent" optional="true"/> - </requires> - <extension point="xbmc.python.pluginsource" library="default.py"> - <provides>video</provides> - </extension> - <extension point="xbmc.addon.metadata"> - <summary lang="es">Navega con Kodi por páginas web.</summary> - <assets> - <icon>logo-cumple.png</icon> - <fanart>fanart.jpg</fanart> - <screenshot>resources/media/themes/ss/1.jpg</screenshot> - <screenshot>resources/media/themes/ss/2.jpg</screenshot> - <screenshot>resources/media/themes/ss/3.jpg</screenshot> - <screenshot>resources/media/themes/ss/4.jpg</screenshot> - </assets> - <news>[B]Estos son los cambios para esta versión:[/B] - [COLOR green][B]Arreglos[/B][/COLOR] - ¤ cinetux ¤ danimados ¤ xms - ¤ bitp ¤ descargacineclasico ¤ dostream - ¤ cinecalidad ¤ pelisplus ¤ Mejortorrent - ¤ Mejortorrent1 ¤ Newpc1 - - </news> - <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> - <summary lang="en">Browse web pages using Kodi</summary> - <description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description> - <disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]</disclaimer> - <platform>all</platform> - <license>GNU GPL v3</license> - <forum>foro</forum> - <website>web</website> - <email>my@email.com</email> - <source>https://github.com/alfa-addon/addon</source> - </extension> - <extension point="xbmc.service" library="videolibrary_service.py" start="login|startup"> - </extension> -</addon> From 97663563a4172334c7cdeaa2cbaf31c5a01a411b Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Thu, 22 Nov 2018 13:10:27 -0500 Subject: [PATCH 08/24] v2.7.15 --- plugin.video.alfa/addon.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 61b48f44..2f0d41b5 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<addon id="plugin.video.alfa" name="Alfa" version="2.7.14" provider-name="Alfa Addon"> +<addon id="plugin.video.alfa" name="Alfa" version="2.7.15" provider-name="Alfa Addon"> <requires> <import addon="xbmc.python" version="2.1.0"/> <import addon="script.module.libtorrent" optional="true"/> From 9e493fb526a297c87ac8d514427fd7cf1695de4c Mon Sep 17 00:00:00 2001 From: paezner <jsaezu@wanadoo.es> Date: Sat, 24 Nov 2018 02:19:19 +0100 Subject: [PATCH 09/24] Correcciones y nuevos --- plugin.video.alfa/channels/LIKUOO.py | 2 - plugin.video.alfa/channels/TXXX.json | 16 +++ plugin.video.alfa/channels/TXXX.py | 135 ++++++++++++++++++ plugin.video.alfa/channels/absoluporn.py | 1 - plugin.video.alfa/channels/alsoporn.py | 2 - plugin.video.alfa/channels/analdin.py | 3 - plugin.video.alfa/channels/bravoporn.py | 1 - plugin.video.alfa/channels/cliphunter.py | 2 - plugin.video.alfa/channels/coomelonitas.py | 3 - plugin.video.alfa/channels/eroticage.py | 7 +- plugin.video.alfa/channels/fapality.py | 4 - plugin.video.alfa/channels/fetishshrine.py | 1 - plugin.video.alfa/channels/filmoviXXX.py | 15 +- plugin.video.alfa/channels/freeporn.py | 1 - plugin.video.alfa/channels/freepornstreams.py | 27 ++-- plugin.video.alfa/channels/hclips.py | 1 - plugin.video.alfa/channels/hdzog.py | 1 - plugin.video.alfa/channels/hellporno.py | 1 - plugin.video.alfa/channels/hotmovs.py | 3 +- plugin.video.alfa/channels/javlin.py | 5 +- plugin.video.alfa/channels/justporn.py | 3 - plugin.video.alfa/channels/mporno.py | 13 -- plugin.video.alfa/channels/muchoporno.py | 1 - plugin.video.alfa/channels/pandamovie.py | 2 - .../channels/peliculaseroticas.py | 2 +- plugin.video.alfa/channels/perfectgirls.py | 2 - plugin.video.alfa/channels/porn300.py | 6 +- plugin.video.alfa/channels/porneq.py | 2 - plugin.video.alfa/channels/pornhub.py | 50 ++----- plugin.video.alfa/channels/qwertty.py | 5 +- plugin.video.alfa/channels/redtube.py | 2 - plugin.video.alfa/channels/sexgalaxy.py | 3 - plugin.video.alfa/channels/sexofilm.py | 7 +- plugin.video.alfa/channels/sunporno.py | 1 - plugin.video.alfa/channels/tabooshare.py | 5 - plugin.video.alfa/channels/tnaflix.json | 16 +++ plugin.video.alfa/channels/tnaflix.py | 116 +++++++++++++++ plugin.video.alfa/channels/tryboobs.json | 16 +++ plugin.video.alfa/channels/tryboobs.py | 104 ++++++++++++++ plugin.video.alfa/channels/videosXYZ.json | 16 +++ plugin.video.alfa/channels/videosXYZ.py | 72 ++++++++++ plugin.video.alfa/channels/vintagetube.json | 16 +++ plugin.video.alfa/channels/vintagetube.py | 95 ++++++++++++ plugin.video.alfa/channels/vintagexxxsex.json | 16 +++ plugin.video.alfa/channels/vintagexxxsex.py | 101 +++++++++++++ plugin.video.alfa/channels/vporn.json | 16 +++ plugin.video.alfa/channels/vporn.py | 109 ++++++++++++++ plugin.video.alfa/channels/watchpornfree.json | 16 +++ plugin.video.alfa/channels/watchpornfree.py | 75 ++++++++++ .../channels/webpeliculasporno.json | 16 +++ .../channels/webpeliculasporno.py | 74 ++++++++++ plugin.video.alfa/channels/woodrocket.json | 16 +++ plugin.video.alfa/channels/woodrocket.py | 75 ++++++++++ plugin.video.alfa/channels/xozilla.json | 16 +++ plugin.video.alfa/channels/xozilla.py | 107 ++++++++++++++ plugin.video.alfa/channels/xtapes.json | 16 +++ plugin.video.alfa/channels/xtapes.py | 121 ++++++++++++++++ plugin.video.alfa/channels/xxxparodyhd.json | 16 +++ plugin.video.alfa/channels/xxxparodyhd.py | 74 ++++++++++ plugin.video.alfa/channels/xxxstreams.json | 16 +++ plugin.video.alfa/channels/xxxstreams.py | 94 ++++++++++++ plugin.video.alfa/channels/youjizz.json | 16 +++ plugin.video.alfa/channels/youjizz.py | 88 ++++++++++++ plugin.video.alfa/channels/youporn.json | 16 +++ plugin.video.alfa/channels/youporn.py | 110 ++++++++++++++ plugin.video.alfa/channels/yuuk.json | 17 +++ plugin.video.alfa/channels/yuuk.py | 69 +++++++++ 67 files changed, 1940 insertions(+), 136 deletions(-) create mode 100644 plugin.video.alfa/channels/TXXX.json create mode 100644 plugin.video.alfa/channels/TXXX.py create mode 100644 plugin.video.alfa/channels/tnaflix.json create mode 100644 plugin.video.alfa/channels/tnaflix.py create mode 100644 plugin.video.alfa/channels/tryboobs.json create mode 100644 plugin.video.alfa/channels/tryboobs.py create mode 100644 plugin.video.alfa/channels/videosXYZ.json create mode 100644 plugin.video.alfa/channels/videosXYZ.py create mode 100644 plugin.video.alfa/channels/vintagetube.json create mode 100644 plugin.video.alfa/channels/vintagetube.py create mode 100644 plugin.video.alfa/channels/vintagexxxsex.json create mode 100644 plugin.video.alfa/channels/vintagexxxsex.py create mode 100644 plugin.video.alfa/channels/vporn.json create mode 100644 plugin.video.alfa/channels/vporn.py create mode 100644 plugin.video.alfa/channels/watchpornfree.json create mode 100644 plugin.video.alfa/channels/watchpornfree.py create mode 100644 plugin.video.alfa/channels/webpeliculasporno.json create mode 100644 plugin.video.alfa/channels/webpeliculasporno.py create mode 100644 plugin.video.alfa/channels/woodrocket.json create mode 100644 plugin.video.alfa/channels/woodrocket.py create mode 100644 plugin.video.alfa/channels/xozilla.json create mode 100644 plugin.video.alfa/channels/xozilla.py create mode 100644 plugin.video.alfa/channels/xtapes.json create mode 100644 plugin.video.alfa/channels/xtapes.py create mode 100644 plugin.video.alfa/channels/xxxparodyhd.json create mode 100644 plugin.video.alfa/channels/xxxparodyhd.py create mode 100644 plugin.video.alfa/channels/xxxstreams.json create mode 100644 plugin.video.alfa/channels/xxxstreams.py create mode 100644 plugin.video.alfa/channels/youjizz.json create mode 100644 plugin.video.alfa/channels/youjizz.py create mode 100644 plugin.video.alfa/channels/youporn.json create mode 100644 plugin.video.alfa/channels/youporn.py create mode 100644 plugin.video.alfa/channels/yuuk.json create mode 100644 plugin.video.alfa/channels/yuuk.py diff --git a/plugin.video.alfa/channels/LIKUOO.py b/plugin.video.alfa/channels/LIKUOO.py index a293fed5..20925e3c 100644 --- a/plugin.video.alfa/channels/LIKUOO.py +++ b/plugin.video.alfa/channels/LIKUOO.py @@ -42,7 +42,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" scrapedthumbnail = "https:" + scrapedthumbnail @@ -78,7 +77,6 @@ def play(item): logger.info() data = scrapertools.cachePage(item.url) itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: videoitem.title = item.fulltitle videoitem.fulltitle = item.fulltitle diff --git a/plugin.video.alfa/channels/TXXX.json b/plugin.video.alfa/channels/TXXX.json new file mode 100644 index 00000000..ee4be410 --- /dev/null +++ b/plugin.video.alfa/channels/TXXX.json @@ -0,0 +1,16 @@ +{ + "id": "TXXX", + "name": "TXXX", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://www.txxx.com/images/desktop-logo.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/TXXX.py b/plugin.video.alfa/channels/TXXX.py new file mode 100644 index 00000000..976d1d4c --- /dev/null +++ b/plugin.video.alfa/channels/TXXX.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys + +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://www.txxx.com' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url=host + "/latest-updates/")) + itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/")) + itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/most-popular/")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search/s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def catalogo(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<div class="channel-thumb">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?<span>(.*?)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches: + scrapedplot = "" + scrapedurl = host + scrapedurl + title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]" + itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div class="c-thumb">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<div class="c-thumb--overlay c-thumb--overlay-title">([^"]+)</div>.*?<span>(.*?)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,num in matches: + scrapedplot = "" + title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]" + itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?<span class="thumb__duration">(.*?)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches: + contentTitle = scrapedtitle + title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"') + video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"') + partes = video_url.split('||') + video_url = decode_url(partes[0]) + video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url) + video_url += '&' if '?' in video_url else '?' + video_url += 'lip=' + partes[2] + '<=' + partes[3] + itemlist.append(item.clone(action="play", title=item.title, url=video_url)) + return itemlist + + +def decode_url(txt): + _0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' + reto = ''; n = 0 + # En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes) + txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt) + txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M') + + while n < len(txt): + a = _0x52f6x15.index(txt[n]) + n += 1 + b = _0x52f6x15.index(txt[n]) + n += 1 + c = _0x52f6x15.index(txt[n]) + n += 1 + d = _0x52f6x15.index(txt[n]) + n += 1 + + a = a << 2 | b >> 4 + b = (b & 15) << 4 | c >> 2 + e = (c & 3) << 6 | d + reto += chr(a) + if c != 64: reto += chr(b) + if d != 64: reto += chr(e) + + return urllib.unquote(reto) + diff --git a/plugin.video.alfa/channels/absoluporn.py b/plugin.video.alfa/channels/absoluporn.py index 31b7a1bf..cb79c516 100644 --- a/plugin.video.alfa/channels/absoluporn.py +++ b/plugin.video.alfa/channels/absoluporn.py @@ -47,7 +47,6 @@ def categorias(item): data = httptools.downloadpage(item.url).data patron = ' <a href="([^"]+)" class="link1">([^"]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" diff --git a/plugin.video.alfa/channels/alsoporn.py b/plugin.video.alfa/channels/alsoporn.py index 3cbc2a9d..d388775b 100644 --- a/plugin.video.alfa/channels/alsoporn.py +++ b/plugin.video.alfa/channels/alsoporn.py @@ -43,7 +43,6 @@ def catalogo(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,cantidad,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -59,7 +58,6 @@ def categorias(item): patron = '<a href="([^"]+)">.*?' patron += '<img src="([^"]+)" alt="([^"]+)" />' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle in matches: scrapedplot = "" scrapedurl = scrapedurl.replace("top", "new") diff --git a/plugin.video.alfa/channels/analdin.py b/plugin.video.alfa/channels/analdin.py index c5c2a66a..0eff8b13 100644 --- a/plugin.video.alfa/channels/analdin.py +++ b/plugin.video.alfa/channels/analdin.py @@ -45,7 +45,6 @@ def catalogo(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -66,7 +65,6 @@ def categorias(item): patron += 'src="([^"]+)".*?' patron += '<div class="videos">([^"]+)</div>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad + ")" @@ -105,7 +103,6 @@ def play(item): data = httptools.downloadpage(item.url).data patron = 'video_url: \'([^\']+)\'' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl in matches: url = scrapedurl itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url)) diff --git a/plugin.video.alfa/channels/bravoporn.py b/plugin.video.alfa/channels/bravoporn.py index 64e25364..6d69dd60 100644 --- a/plugin.video.alfa/channels/bravoporn.py +++ b/plugin.video.alfa/channels/bravoporn.py @@ -44,7 +44,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a href="([^"]+)" class="th">.*?<img src="([^"]+)".*?<span>([^"]+)</span>\s*(\d+) movies.*?</strong>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad + ")" diff --git a/plugin.video.alfa/channels/cliphunter.py b/plugin.video.alfa/channels/cliphunter.py index a97e0b70..52ec1d56 100644 --- a/plugin.video.alfa/channels/cliphunter.py +++ b/plugin.video.alfa/channels/cliphunter.py @@ -44,7 +44,6 @@ def catalogo(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle in matches: scrapedplot = "" scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies" @@ -63,7 +62,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = scrapedtitle diff --git a/plugin.video.alfa/channels/coomelonitas.py b/plugin.video.alfa/channels/coomelonitas.py index e641ec02..8745600c 100644 --- a/plugin.video.alfa/channels/coomelonitas.py +++ b/plugin.video.alfa/channels/coomelonitas.py @@ -36,11 +36,8 @@ def search(item, texto): def categorias(item): itemlist = [] data = scrapertools.cache_page(item.url) - #data = scrapertools.get_match(data,'<div class="sidetitle">Categorías</div>(.*?)</ul>') - #<li class="cat-item cat-item-203077"><a href="http://www.coomelonitas.com/Categoria/asiaticas">ASIÁTICAS</a> patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" diff --git a/plugin.video.alfa/channels/eroticage.py b/plugin.video.alfa/channels/eroticage.py index 2949a4f8..a0c283c9 100644 --- a/plugin.video.alfa/channels/eroticage.py +++ b/plugin.video.alfa/channels/eroticage.py @@ -43,7 +43,6 @@ def catalogo(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a class=""\s+title="([^"]+)"\s+href="([^"]+)">' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedtitle,scrapedurl in matches: scrapedplot = "" scrapedthumbnail = "" @@ -59,7 +58,6 @@ def categorias(item): data = scrapertools.get_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"') patron = '<a href="(.*?)".*?>(.*?)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -92,9 +90,8 @@ def findvideos(item): itemlist = [] data = httptools.downloadpage(item.url).data data = scrapertools.get_match(data,'<div id="wrapper" class="ortala">(.*?)<div class="butonlar">') - patron = '<iframe.*?src="([^"]+)"' + patron = '<iframe\s+src="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: itemlist.append( Item(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl)) return itemlist @@ -104,7 +101,7 @@ def play(item): data = scrapertools.cachePage(item.url) itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: - videoitem.title = item.fulltitle + videoitem.title = item.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videochannel=item.channel diff --git a/plugin.video.alfa/channels/fapality.py b/plugin.video.alfa/channels/fapality.py index 737fa0a0..fc850405 100644 --- a/plugin.video.alfa/channels/fapality.py +++ b/plugin.video.alfa/channels/fapality.py @@ -45,7 +45,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<div class="item"><a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)">.*?<div class="right">([^"]+)</div>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")" @@ -79,9 +78,6 @@ def play(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) - #<source id="video_source_1" src="https://fapality.com/get_file/1/f82551e4920151d30f5cf7ffd0307e44/23000/23869/23869.mp4/?br=2052" type="video/mp4" data-is-hd="true" title="720p"><source id="video_source_2" src="https://fapality.com/get_file/1/2bcbf0efc756b6abfbf999c0a788c743/23000/23869/23869_480p.mp4/?br=1271" type="video/mp4" title="480p"><source id="video_source_3" src="https://fapality.com/get_file/1/d7be2e24a8a6bd6d1e10237c10adefb0/23000/23869/23869_240p.mp4/?br=516" type="video/mp4" title="240p"></video><div class="fluid-end-related" id="main_video_fluid_end_related" style='display:none'><div class="fluid-end-related-bg"><div class="fluid-end-tabs"><ul class="fluid-end-tabs-container"><li class="r_share">Share</li><li class="r_related active" onclick="myFP.getRelated('related','/related_videos_html.php?video_id=23869&mode_related=3');">Related Videos</li><li class="r_toprated" onclick="myFP.getRelated('toprated','/related_videos_html.php?video_id=23869&sort_by=rating&mode_related=3');">Related Top Rated Videos</li><li class="r_mostviewed" onclick="myFP.getRelated('mostviewed','/related_videos_html.php?video_id=23869&sort_by=video_viewed&mode_related=3');">Related Popular Videos</li><li class="r_channels" onclick="myFP.getRelated('channels','/related_videos_html.php?video_id=23869&mode_related=1&sort_by=last_time_view_date');">More from Nubiles-Porn</li><li class="r_models" onclick="myFP.getRelated('models','/related_videos_html.php?video_id=23869&mode_related=4&sort_by=last_time_view_date');">More Models Videos</li></ul></div><div class="fluid-end-related-body"><span class="fluid-end-related-close-btn" onclick="myFP.hideRelated(true);"><i class="fa fa-times"></i></span><div class="fluid-end-tab-content"><div id="r_share" class="fluid-end-tab-page"><div class="fluid-end-share-row"><div class="fluid-end-share-title">Share</div><div class="fluid-end-share-content"><div id="share_social" class="fluid-end-social" data-url="https://fapality.com/23869/" data-title="Amia Miley fucks GYM buddy and jiggles her big fake boobs" data-description="Amia Miley is a sporty brunette with ripped six pack and silicone implants in ass and titties. She works out at the GYM with her beefy boy, getting her pussy licked and fucked missionary style on the bench. Her big fake boobs and her round booty jiggle during doggystyle pounding. Amia rides it like a cowgirl and takes cum on face." data-media="https://i.fapality.com/videos_screenshots/23000/23869/preview_480p.mp4.jpg"></div></div></div><div class="fluid-end-share-row"><div class="fluid-end-share-title">Video Url:</div><div class="fluid-end-share-content"><input type="text" onclick="this.select();" id="current_url" readonly="readonly" value="https://fapality.com/23869/"/></div></div><div class="fluid-end-share-row"><div class="fluid-end-share-title">Embed code:</div><div class="fluid-end-share-content"><textarea onclick="this.select();" readonly="readonly" id="share_embed" rows="3"></textarea></div></div></div><div id="r_related" class="fluid-end-tab-page fluid-end-video-thumbs active-page"></div><div id="r_toprated" class="fluid-end-tab-page fluid-end-video-thumbs"></div><div id="r_mostviewed" class="fluid-end-tab-page fluid-end-video-thumbs"></div><div id="r_channels" class="fluid-end-tab-page fluid-end-video-thumbs"></div><div id="r_models" class="fluid-end-tab-page fluid-end-video-thumbs"></div></div></div></div></div></div><script src="/player/fplayers/fplayer.js"></script><link href="/player/fplayers/fplayer.css" rel="stylesheet" type="text/css"><script>var is_playlist = document.querySelector('.watch-list') ? true : false;var allow_download = is_playlist ? false : true;video_id = 23869;if(document.getElementById('video_source_2')) {document.getElementById('video_source_2').setAttribute('data-default','true');} else {document.getElementById('video_source_1').setAttribute('data-default','true');}function getEmbed() {var embedCode = '<iframe width="924" height="521" src="https://fapality.com/embed/23869" frameborder="0" allowfullscreen webkitallowfullscreen mozallowfullscreen oallowfullscreen msallowfullscreen>';embedCode += '</iframe>';return embedCode;}document.getElementById('share_embed').value = getEmbed();var adList = [];if(fluidPlayerClass.getCookie('pb_vast')==1) {adList.push({roll: 'preRoll',vastTag: "https://syndication.exosrv.com/splash.php?idzone=3180702",});} else {adList.push({roll: 'preRoll',vastTag: "https://syndication.exosrv.com/splash.php?idzone=2946968",});}adList.push({roll: 'midRoll',vastTag: 'https://syndication.exosrv.com/splash.php?idzone=2947082',size : '728x90',cookieTime : 1,timer:10,});htmlOnPauseBlock = {html : '<div class="fluid-b"><div class="fluid-b-title">Advertisement</div><div class="fluid-b-content"><iframe src="/b/inplayer.html" width="300" height="250" frameborder="0" scrolling="no" allowtransparency="true" marginheight="0" marginwidth="0"></iframe></div><div class="fluid-b-footer"><span onclick="myFP.play()" class="fluid-b-btn">Continue Play</span></div><span onclick="myFP.play()" class="fluid-b-close-btn"><i class="fa fa-times"></i></span></div>',height: 277,width:304,onBeforeShow : function() {}};var myFP = fluidPlayer('main_video',{layoutControls: {timelinePreview: {file: 'https://fapality.com/thumbnails.vtt?id=23869&i=5&d=420&format=inplayer',type: 'VTT'},allowTheatre: true,allowDownload: allow_download,primaryColor:'#fb5350',shareCs:{url : 'https://fapality.com/cs/nubiles-porn/',title : 'View full video at Nubiles-Porn',},playbackRateEnabled: true,htmlOnPauseBlock: htmlOnPauseBlock,controlBar: {autoHide: true,autoHideTimeout: 3,animated: true,},},vastOptions: {adList: adList,vastAdvanced: {vastLoadedCallback: (function() {if(fluidPlayerClass.getCookie('pb_source')=="1") {fluidPlayerClass.setCookie('pb_vast',1,6);}}),noVastVideoCallback: (function() {}),vastVideoSkippedCallback: (function() {}),vastVideoEndedCallback: (function() {})},adCTAText: false,vastTimeout: 5000,}});var player_obj = myFP;</script><div class="cs"><noindex><a href="https://fapality.com/cs/nubiles-porn/" rel="nofollow" target="_blank" title="Nubiles-Porn"><img src="https://fapality.com/contents/content_sources/112/c1_nubiles-porn.png" alt="Nubiles-Porn"></a></noindex></div></div><meta itemprop="duration" content="T7M00S" /><meta itemprop="thumbnailUrl" content="https://i.fapality.com/videos_screenshots/23000/23869/preview_480p.mp4.jpg" /><meta itemprop="embedUrl" content="https://fapality.com/embed/23869/"><meta itemprop="requiresSubscription" content="false"><meta itemprop="uploadDate" content="2018-11-01"><meta itemprop="encodingFormat" content="mpeg4"><div class="simple-toolbar"><div class="btn-group right"><a class="btn toggle" data-toggle-id="#shares"><i class="fa fa-share-alt"></i> Share or Embed <i class="fa fa-caret-down"></i></a></div><div class="btn-group right"><span data-tooltip aria-haspopup="true" title="Report Inappropriate Content" id="report_button" data-dropdown="report" aria-controls="report" aria-expanded="false" class="btn radius"><i class="fa fa-flag"></i></span></div><form id="report" data-abide class="f-dropdown small" data-dropdown-content class="f-dropdown" aria-hidden="true" tabindex="-1"><div class="box"><div class="title">Report Inappropriate Content</div><div class="content"><label for="flag-1"><input type="radio" id="flag-1" value="flag_inappropriate_video" name="flag_id"> Inappropriate Video</label><label for="flag-2"><input type="radio" id="flag-2" value="flag_error" name="flag_id"> Error (no video, no sound)</label><label for="flag-3"><input type="radio" id="flag-3" value="copyrighted_video" name="flag_id"> Copyright material</label><textarea required name="flag_message" id="report_text" rows="3" placeholder="Please tell us the reason"></textarea><small class="error">The field is required</small><input type="hidden" name="action" value="flag"><input type="hidden" name="mode" value="async"><input type="hidden" name="video_id" value="23869"></div><div class="bottom"><span id="send_report" class="btn">Send Report</span></div></div></form><div class="btn-group right"><a href="https://fapality.com/login.php?action=not_allowed" data-reveal-id="login_wrapper" data-reveal-ajax="/login.php?m=1&action=not_allowed" class="btn"><i class="fa fa-plus-circle"></i> Add to Faplist</a></div><div class="btn-group right"><a data-tooltip aria-haspopup="true" href="https://fapality.com/login.php?action=not_allowed" title="Save in favourites" data-reveal-id="login_wrapper" data-reveal-ajax="/login.php?m=1&action=not_allowed" class="btn"><i class="fa fa-heart"></i><i class="fa fa-plus"></i></a><span class="btn btn-content">0</span></div><div class="btn-group likes" itemprop="aggregateRating" itemscope itemtype="http://schema.org/AggregateRating"><span class="btn" id="like" data-tooltip aria-haspopup="true" title="I Like It"><i class="fa fa-thumbs-up"></i> <span class="g_hidden">I Like It</span></span><meta itemprop="bestRating" content="100"><meta itemprop="worstRating" content="0"><span class="btn btn-content"><span class="rating-info"><span id="rating_value" itemprop="ratingValue">94</span>% (<span class="tiny" id="rating_amount" itemprop="ratingCount">3</span>)</span><span class="rating-line"><span style="width:93.334px"></span></span></span><span class="btn" id="dislike" data-tooltip aria-haspopup="true" title="I Dislike It"><i class="fa fa-thumbs-down"></i> <span class="g_hidden">I Dislike It</span></span></div><div class="btn-group"><a href="https://fapality.com/login.php?action=not_allowed" data-reveal-id="login_wrapper" data-reveal-ajax="/login.php?m=1&action=not_allowed" class="btn downloadVideos"><i class="fa fa-download"></i> Download (105.22 Mb)</a></div><i class="fa fa-spin fa-spinner ajax-loader g_hidden"></i><div class="share toolbox row" id="shares"><div class="columns large-6"><label>Embed code:</label><textarea id="embed_code" readonly class="copy_click" rows="3"></textarea></div><div class="columns large-6"><label>Share video:</label><div class="addthis_sharing_toolbox"></div><input type="text" readonly id="current_url" class="copy_click" value="https://fapality.com/23869/"></div></div></div><div class="simple-footer"><div class="description" itemprop="description">Amia Miley is a sporty brunette with ripped six pack and silicone implants in ass and titties. She works out at the GYM with her beefy boy, getting her pussy licked and fucked missionary style on the bench. Her big fake boobs and her round booty jiggle during doggystyle pounding. Amia rides it like a cowgirl and takes cum on face.</div><div class="right content_source" itemprop="productionCompany" itemscope itemtype="http://schema.org/Organization"><a data-view="/ajax.php?mode=async&function=get_block&block_id=content_source_view_get_channel&cs_id=112" data-dropdown-width="small" class="button" href="https://fapality.com/channels/nubiles-porn/" title="Nubiles-Porn videos"><i class="fa fa-video-camera"></i> Nubiles-Porn</a><meta itemprop="name" content="Nubiles-Porn"><meta itemprop="url" content="https://fapality.com/channels/nubiles-porn/"><script>var addonView = {'name' : 'channel','url' : '/ajax.php?mode=async&function=get_block&block_id=list_content_sources_channel&q=Nubiles-Porn',}</script></div><div class="right content_source"><a class="button info" data-view="/ajax.php?mode=async&function=get_block&block_id=model_view_get_model&model_id=929" data-dropdown-width="small" href="https://fapality.com/pornstars/amia-miley/" title="Amia Miley videos" itemscope itemprop="actor" itemtype="http://schema.org/Person"><i class="fa fa-female"></i> Amia Miley<meta itemprop="name" content="Amia Miley"><meta itemprop="url" content="https://fapality.com/pornstars/amia-miley/"></a><script>var addonView = {'name' : 'model','url' : '/ajax.php?mode=async&function=get_block&block_id=list_models_model&model_id=929&q=Amia+Miley',}</script></div><ul class="meta"><li>Added: <span>4 days ago</span> <span>by</span> <a href='/users/1/' data-view='/ajax.php?mode=async&function=get_block&block_id=member_profile_view_get_profile&user_id=1'>Admin</a></li><li>Duration: <span>7:00</span></li><li>Viewed: <span>1,499</span></li></ul><meta itemprop="genre" content="Big tits"><ul class="tags_list"><li>Categories</li><li><a class="main" href="https://fapality.com/categories/big-tits/" title="Big tits videos">Big tits</a></li><li><a href="https://fapality.com/categories/big-tits/workout/" title="Big tits workout videos">workout</a></li><li><a href="https://fapality.com/categories/big-tits/gym/" title="Big tits GYM videos">GYM</a></li><li><a href="https://fapality.com/categories/big-tits/sporty/" title="Big tits sporty videos">sporty</a></li><li><a href="https://fapality.com/categories/big-tits/tanned/" title="Big tits tanned videos">tanned</a></li><li><a href="https://fapality.com/categories/big-tits/athletic/" title="Big tits athletic videos">athletic</a></li><li><a href="https://fapality.com/categories/big-tits/fake-tits/" title="Big tits fake tits videos">fake tits</a></li><li><a href="https://fapality.com/categories/big-tits/fake-ass/" title="Big tits fake ass videos">fake ass</a></li><li><a href="https://fapality.com/categories/big-tits/pussy-licking/" title="Big tits pussy licking videos">pussy licking</a></li><li><a href="https://fapality.com/categories/big-tits/missionary/" title="Big tits missionary videos">missionary</a></li><li><a href="https://fapality.com/categories/big-tits/orgasm/" title="Big tits orgasm videos">orgasm</a></li><li><a href="https://fapality.com/categories/big-tits/doggystyle/" title="Big tits doggystyle videos">doggystyle</a></li><li><a href="https://fapality.com/categories/big-tits/riding/" title="Big tits riding videos">riding</a></li><li><a href="https://fapality.com/categories/big-tits/cumshot/" title="Big tits cumshot videos">cumshot</a></li><li><a href="https://fapality.com/categories/big-tits/tattoos/" title="Big tits tattoos videos">tattoos</a></li></ul></div></div></div> - - patron = '<source id="video_source_1" src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl in matches: diff --git a/plugin.video.alfa/channels/fetishshrine.py b/plugin.video.alfa/channels/fetishshrine.py index 7f06b2a1..48e45937 100644 --- a/plugin.video.alfa/channels/fetishshrine.py +++ b/plugin.video.alfa/channels/fetishshrine.py @@ -44,7 +44,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a href="([^"]+)" title="([^"]+) porn tube" class="thumb">.*?<img src="([^"]+)".*?<span class="total">([^"]+)</span>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad + ")" diff --git a/plugin.video.alfa/channels/filmoviXXX.py b/plugin.video.alfa/channels/filmoviXXX.py index 5bf7eb4c..44b952c4 100644 --- a/plugin.video.alfa/channels/filmoviXXX.py +++ b/plugin.video.alfa/channels/filmoviXXX.py @@ -27,10 +27,23 @@ def mainlist(item): thumbnail = scrapedthumbnail plot = "" year = "" - itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} )) + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} )) next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel , action="mainlist" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) return itemlist + + +def play(item): + logger.info() + data = scrapertools.cachePage(item.url) + itemlist = servertools.find_video_items(data=data) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + return itemlist diff --git a/plugin.video.alfa/channels/freeporn.py b/plugin.video.alfa/channels/freeporn.py index 43f149d0..0f60bbba 100644 --- a/plugin.video.alfa/channels/freeporn.py +++ b/plugin.video.alfa/channels/freeporn.py @@ -42,7 +42,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<li class="thumb thumb-category">.*?<a href="([^"]+)">.*?<img class="lazy" data-original="([^"]+)">.*?<div class="name">([^"]+)</div>.*?<div class="count">(\d+)</div>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad + ")" diff --git a/plugin.video.alfa/channels/freepornstreams.py b/plugin.video.alfa/channels/freepornstreams.py index a6d5e464..c1cee64b 100644 --- a/plugin.video.alfa/channels/freepornstreams.py +++ b/plugin.video.alfa/channels/freepornstreams.py @@ -46,7 +46,6 @@ def catalogo(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<li id="menu-item-\d+".*?u=([^"]+)">(.*?)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -59,7 +58,6 @@ def categorias(item): data = httptools.downloadpage(item.url).data patron = '<li><a href="([^"]+)" rel="nofollow">(.*?)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -75,25 +73,26 @@ def peliculas(item): patron = '<article id="post-\d+".*?<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?<img src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - contentTitle = scrapedtitle title = scrapedtitle thumbnail = scrapedthumbnail.replace("jpg#", "jpg") plot = "" year = "" - itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} )) - + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, fulltitle=title, infoLabels={'year':year} )) next_page_url = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) - - - # else: - # patron = '<div class="nav-previous"><a href="(.*?)"' - # next_page = re.compile(patron,re.DOTALL).findall(data) - #next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">') - # next_page = next_page[0] - #next_page = host + next_page - # itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page ) ) return itemlist + +def play(item): + logger.info() + data = scrapertools.cachePage(item.url) + itemlist = servertools.find_video_items(data=data) + for videoitem in itemlist: + videoitem.title = item.fulltitle + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videochannel=item.channel + return itemlist + \ No newline at end of file diff --git a/plugin.video.alfa/channels/hclips.py b/plugin.video.alfa/channels/hclips.py index ce16f24f..b071e9c2 100644 --- a/plugin.video.alfa/channels/hclips.py +++ b/plugin.video.alfa/channels/hclips.py @@ -43,7 +43,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a href="([^"]+)" class="thumb">.*?src="([^"]+)".*?<strong class="title">([^"]+)</strong>.*?<b>(.*?)</b>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches: scrapedplot = "" title = scrapedtitle + " \(" + vidnum + "\)" diff --git a/plugin.video.alfa/channels/hdzog.py b/plugin.video.alfa/channels/hdzog.py index 93d6e11b..9e3e7532 100644 --- a/plugin.video.alfa/channels/hdzog.py +++ b/plugin.video.alfa/channels/hdzog.py @@ -43,7 +43,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<li>.*?<a href="([^"]+)".*?<img class="thumb" src="([^"]+)" alt="([^"]+)".*?<span class="videos-count">(\d+)</span>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches: scrapedplot = "" title = scrapedtitle + " \(" + vidnum + "\)" diff --git a/plugin.video.alfa/channels/hellporno.py b/plugin.video.alfa/channels/hellporno.py index bad030cc..abc63bf4 100644 --- a/plugin.video.alfa/channels/hellporno.py +++ b/plugin.video.alfa/channels/hellporno.py @@ -41,7 +41,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+) - Porn videos">.*?<span>(\d+) videos</span>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad + ")" diff --git a/plugin.video.alfa/channels/hotmovs.py b/plugin.video.alfa/channels/hotmovs.py index 9ff847eb..691de51a 100644 --- a/plugin.video.alfa/channels/hotmovs.py +++ b/plugin.video.alfa/channels/hotmovs.py @@ -45,7 +45,6 @@ def catalogo(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a class="thumbnail" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="thumbnail__info__right">\s+([^"]+)\s+</span>.*?<h5>([^"]+)</h5>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: scrapedplot = "" cantidad = cantidad.replace(" ", "") @@ -66,7 +65,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a class="thumbnail" href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)".*?<i class="mdi mdi-video"></i>([^"]+)</div>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: scrapedplot = "" cantidad = cantidad.replace(" ", "") @@ -100,6 +98,7 @@ def peliculas(item): itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) return itemlist + def play(item): logger.info() itemlist = [] diff --git a/plugin.video.alfa/channels/javlin.py b/plugin.video.alfa/channels/javlin.py index e70da654..562c102e 100644 --- a/plugin.video.alfa/channels/javlin.py +++ b/plugin.video.alfa/channels/javlin.py @@ -42,7 +42,6 @@ def categorias(item): data = httptools.downloadpage(item.url).data patron = '<option class="level-0" value="([^"]+)">([^"]+)  \((.*?)\)<' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,number in matches: scrapedplot = "" scrapedthumbnail = "" @@ -59,11 +58,10 @@ def peliculas(item): data = httptools.downloadpage(item.url).data patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) - next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next') + next_page_url = scrapertools.find_single_match(data,'<span class=\'currenttext\'>.*?href=\'([^\']+)\' class=\'inactive\'>') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) @@ -75,7 +73,6 @@ def play(item): logger.info() data = scrapertools.cachePage(item.url) itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: videoitem.title = item.title videoitem.fulltitle = item.fulltitle diff --git a/plugin.video.alfa/channels/justporn.py b/plugin.video.alfa/channels/justporn.py index 38e424e7..4a1dbccd 100644 --- a/plugin.video.alfa/channels/justporn.py +++ b/plugin.video.alfa/channels/justporn.py @@ -44,7 +44,6 @@ def categorias(item): data = httptools.downloadpage(item.url).data patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<div class="videos">(\d+) video.*?</div>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,numero in matches: scrapedplot = "" scrapedthumbnail = "" @@ -61,7 +60,6 @@ def peliculas(item): data = httptools.downloadpage(item.url).data patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?data-original="([^"]+)".*?<div class="duration">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: scrapedplot = "" scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle @@ -81,7 +79,6 @@ def play(item): data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) patron = 'video_url: \'([^\']+)\'' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl in matches: scrapedplot = "" itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) ) diff --git a/plugin.video.alfa/channels/mporno.py b/plugin.video.alfa/channels/mporno.py index 3e8d165c..492b77f9 100644 --- a/plugin.video.alfa/channels/mporno.py +++ b/plugin.video.alfa/channels/mporno.py @@ -44,7 +44,6 @@ def categorias(item): data = httptools.downloadpage(item.url).data patron = '<h3><a href="([^"]+)">(.*?)</a> <small>(.*?)</small></h3>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,cantidad in matches: scrapedplot = scrapedurl.replace("http://mporno.unblckd.org/", "").replace("page1.html", "") scrapedthumbnail = "" @@ -58,9 +57,6 @@ def peliculas(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) - - - patron = '<img class="content_image" src="([^"]+).mp4/.*?" alt="([^"]+)".*?this.src="(.*?)"' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: @@ -76,14 +72,5 @@ def peliculas(item): next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) - - # else: - # patron = '<a href=\'([^\']+)\' class="next">Next >></a>' - # next_page = re.compile(patron,re.DOTALL).findall(data) - # next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">') - # plot = item.plot - # next_page = next_page[0] - # next_page = host + plot + next_page - # itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page, plot=plot ) ) return itemlist diff --git a/plugin.video.alfa/channels/muchoporno.py b/plugin.video.alfa/channels/muchoporno.py index b4c74ab7..9f81d700 100644 --- a/plugin.video.alfa/channels/muchoporno.py +++ b/plugin.video.alfa/channels/muchoporno.py @@ -41,7 +41,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = scrapedtitle diff --git a/plugin.video.alfa/channels/pandamovie.py b/plugin.video.alfa/channels/pandamovie.py index 582d084a..da454282 100644 --- a/plugin.video.alfa/channels/pandamovie.py +++ b/plugin.video.alfa/channels/pandamovie.py @@ -19,7 +19,6 @@ def mainlist(item): itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/list-movies")) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies")) itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -47,7 +46,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<li><a title=".*?" href="([^"]+)">([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" diff --git a/plugin.video.alfa/channels/peliculaseroticas.py b/plugin.video.alfa/channels/peliculaseroticas.py index 63d2ddb8..4291ad2f 100755 --- a/plugin.video.alfa/channels/peliculaseroticas.py +++ b/plugin.video.alfa/channels/peliculaseroticas.py @@ -46,6 +46,6 @@ def mainlist(item): next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html" itemlist.append( - Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page_url, folder=True)) + Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url, folder=True)) return itemlist diff --git a/plugin.video.alfa/channels/perfectgirls.py b/plugin.video.alfa/channels/perfectgirls.py index cc7056a9..1d8d6609 100644 --- a/plugin.video.alfa/channels/perfectgirls.py +++ b/plugin.video.alfa/channels/perfectgirls.py @@ -41,7 +41,6 @@ def categorias(item): # data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -79,7 +78,6 @@ def findvideos(item): data = httptools.downloadpage(item.url).data patron = '<source src="([^"]+)" res="\d+" label="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl)) return itemlist diff --git a/plugin.video.alfa/channels/porn300.py b/plugin.video.alfa/channels/porn300.py index b512ffdd..37f5970f 100644 --- a/plugin.video.alfa/channels/porn300.py +++ b/plugin.video.alfa/channels/porn300.py @@ -43,9 +43,8 @@ def catalogo(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg> ([^"]+)</li>' + patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg>\s+([^"]+) </li>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad +")" @@ -65,11 +64,10 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src="([^"]+)".*?</svg>([^"]+) </small>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad +")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/?sort=latest" itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">') if next_page_url!="": diff --git a/plugin.video.alfa/channels/porneq.py b/plugin.video.alfa/channels/porneq.py index 33de1d96..527f83d8 100644 --- a/plugin.video.alfa/channels/porneq.py +++ b/plugin.video.alfa/channels/porneq.py @@ -45,7 +45,6 @@ def peliculas(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="timer">(.*?)</span></div>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches: scrapedplot = "" scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle @@ -62,7 +61,6 @@ def play(item): data = httptools.downloadpage(item.url).data patron = '"video-setup".*?file: "(.*?)",' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl in matches: scrapedurl = str(scrapedurl) itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl, diff --git a/plugin.video.alfa/channels/pornhub.py b/plugin.video.alfa/channels/pornhub.py index 7f10160e..22699893 100755 --- a/plugin.video.alfa/channels/pornhub.py +++ b/plugin.video.alfa/channels/pornhub.py @@ -2,7 +2,6 @@ import re import urlparse - from core import httptools from core import scrapertools from core.item import Item @@ -38,27 +37,22 @@ def search(item, texto): def categorias(item): logger.info() itemlist = [] - - # Descarga la página data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match(data, '<div id="categoriesStraightImages">(.*?)</ul>') - - # Extrae las categorias + + patron = '<li class="cat_pic" data-category=".*?' patron += '<a href="([^"]+)".*?' - patron += '<img src="([^"]+)".*?' + patron += 'src="([^"]+)".*?' patron += 'alt="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: if "?" in scrapedurl: url = urlparse.urljoin(item.url, scrapedurl + "&o=cm") else: url = urlparse.urljoin(item.url, scrapedurl + "?o=cm") - itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart, thumbnail=scrapedthumbnail)) - itemlist.sort(key=lambda x: x.title) return itemlist @@ -66,30 +60,21 @@ def categorias(item): def peliculas(item): logger.info() itemlist = [] - - # Descarga la página data = httptools.downloadpage(item.url).data videodata = scrapertools.find_single_match(data, 'videos search-video-thumbs">(.*?)<div class="reset"></div>') - - # Extrae las peliculas patron = '<div class="phimage">.*?' patron += '<a href="([^"]+)" title="([^"]+).*?' patron += '<var class="duration">([^<]+)</var>(.*?)</div>.*?' patron += 'data-mediumthumb="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(videodata) - for url, scrapedtitle, duration, scrapedhd, thumbnail in matches: - title = scrapedtitle.replace("&amp;", "&") + " (" + duration + ")" - + title = "(" + duration + ") " + scrapedtitle.replace("&amp;", "&") scrapedhd = scrapertools.find_single_match(scrapedhd, '<span class="hd-thumbnail">(.*?)</span>') if scrapedhd == 'HD': title += ' [HD]' - url = urlparse.urljoin(item.url, url) itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, thumbnail=thumbnail)) - if itemlist: # Paginador patron = '<li class="page_next"><a href="([^"]+)"' @@ -99,29 +84,16 @@ def peliculas(item): itemlist.append( Item(channel=item.channel, action="peliculas", title=">> Página siguiente", fanart=item.fanart, url=url)) - return itemlist - def play(item): logger.info() itemlist = [] - - # Descarga la página - data = httptools.downloadpage(item.url).data - - quality = scrapertools.find_multiple_matches(data, '"id":"quality([^"]+)"') - for q in quality: - match = scrapertools.find_single_match(data, 'var quality_%s=(.*?);' % q) - match = re.sub(r'(/\*.*?\*/)', '', match).replace("+", "") - url = "" - for s in match.split(): - val = scrapertools.find_single_match(data, 'var %s=(.*?);' % s.strip()) - if "+" in val: - values = scrapertools.find_multiple_matches(val, '"([^"]+)"') - val = "".join(values) - - url += val.replace('"', "") - itemlist.append([".mp4 %s [directo]" % q, url]) - + data = scrapertools.cachePage(item.url) + patron = '"defaultQuality":true,"format":"","quality":"\d+","videoUrl":"(.*?)"' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl in matches: + url = scrapedurl.replace("\/", "/") + itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url)) return itemlist + diff --git a/plugin.video.alfa/channels/qwertty.py b/plugin.video.alfa/channels/qwertty.py index 16374422..8113ad60 100644 --- a/plugin.video.alfa/channels/qwertty.py +++ b/plugin.video.alfa/channels/qwertty.py @@ -42,7 +42,6 @@ def categorias(item): data = httptools.downloadpage(item.url).data patron = '<li><a href="([^<]+)">(.*?)</a></li>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -58,7 +57,6 @@ def peliculas(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?<img data-src="(.*?)".*?<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches: scrapedplot = "" title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle @@ -85,4 +83,5 @@ def play(item): scrapedurl = scrapedurl.replace("\/", "/") itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist \ No newline at end of file + return itemlist + diff --git a/plugin.video.alfa/channels/redtube.py b/plugin.video.alfa/channels/redtube.py index 7cdf10c3..c26185df 100644 --- a/plugin.video.alfa/channels/redtube.py +++ b/plugin.video.alfa/channels/redtube.py @@ -42,7 +42,6 @@ def catalogo(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a class="pornstar_link js_mpop js-pop" href="([^"]+)".*?"([^"]+)"\s+title="([^"]+)".*?<div class="ps_info_count">\s+([^"]+)\s+Videos' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] " @@ -61,7 +60,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad + ")" diff --git a/plugin.video.alfa/channels/sexgalaxy.py b/plugin.video.alfa/channels/sexgalaxy.py index 44ad1113..0967ce96 100644 --- a/plugin.video.alfa/channels/sexgalaxy.py +++ b/plugin.video.alfa/channels/sexgalaxy.py @@ -42,7 +42,6 @@ def canales (item): data = scrapertools.get_match(data,'Top Networks</a>(.*?)</ul>') patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -59,7 +58,6 @@ def categorias(item): data = scrapertools.get_match(data,'More Categories</a>(.*?)</ul>') patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -75,7 +73,6 @@ def peliculas(item): data = httptools.downloadpage(item.url).data patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fulltitle=scrapedtitle , plot=scrapedplot , folder=True) ) diff --git a/plugin.video.alfa/channels/sexofilm.py b/plugin.video.alfa/channels/sexofilm.py index a55589a5..f11d758f 100644 --- a/plugin.video.alfa/channels/sexofilm.py +++ b/plugin.video.alfa/channels/sexofilm.py @@ -47,7 +47,6 @@ def categorias(item): data = scrapertools.get_match(data,'<div class="tagcloud">(.*?)<p>') patron = '<a href="(.*?)".*?>(.*?)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) ) return itemlist @@ -60,7 +59,6 @@ def catalogo(item): data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>') patron = '<a href="(.*?)">(.*?)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) ) return itemlist @@ -71,7 +69,6 @@ def anual(item): data = httptools.downloadpage(item.url).data patron = '<li><a href="([^<]+)">([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -85,16 +82,14 @@ def peliculas(item): data = httptools.downloadpage(item.url).data patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" - scrapedtitle = scrapedtitle.replace(" Porn DVD", "") + scrapedtitle = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "") itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) - return itemlist diff --git a/plugin.video.alfa/channels/sunporno.py b/plugin.video.alfa/channels/sunporno.py index 565f9224..fb062d74 100644 --- a/plugin.video.alfa/channels/sunporno.py +++ b/plugin.video.alfa/channels/sunporno.py @@ -45,7 +45,6 @@ def categorias(item): data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a href="([^"]+)">\s*(.*?)\s*<' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" diff --git a/plugin.video.alfa/channels/tabooshare.py b/plugin.video.alfa/channels/tabooshare.py index aedc7c93..d4ecb8b8 100644 --- a/plugin.video.alfa/channels/tabooshare.py +++ b/plugin.video.alfa/channels/tabooshare.py @@ -18,8 +18,6 @@ def mainlist(item): itemlist = [] itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host)) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - -# itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -30,7 +28,6 @@ def categorias(item): data = scrapertools.get_match(data,'<h3>Categories</h3>(.*?)</ul>') patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)</a>' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" @@ -45,7 +42,6 @@ def peliculas(item): data = httptools.downloadpage(item.url).data patron = '<div class="post" id="post-\d+">.*?<a href="([^"]+)" title="(.*?)"><img src="(.*?)"' matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = scrapedtitle.replace(" – Free Porn Download", "") @@ -63,7 +59,6 @@ def play(item): logger.info() data = scrapertools.cachePage(item.url) itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: videoitem.title = item.title videoitem.fulltitle = item.fulltitle diff --git a/plugin.video.alfa/channels/tnaflix.json b/plugin.video.alfa/channels/tnaflix.json new file mode 100644 index 00000000..58caddcd --- /dev/null +++ b/plugin.video.alfa/channels/tnaflix.json @@ -0,0 +1,16 @@ +{ + "id": "tnaflix", + "name": "tnaflix", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "https://www.tnaflix.com/images/favicons/tnaflix/android-icon-192x192.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/tnaflix.py b/plugin.video.alfa/channels/tnaflix.py new file mode 100644 index 00000000..92707e14 --- /dev/null +++ b/plugin.video.alfa/channels/tnaflix.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from platformcode import config, logger +from core import scrapertools +from core.item import Item +from core import servertools +from core import httptools +from core import tmdb +from core import jsontools + +host = 'https://www.tnaflix.com' + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/new/1")) + itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/?period=month&d=all")) + itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/toprated/?d=all&period=month")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/all/top-rated/1/all")) + itemlist.append( Item(channel=item.channel, title="PornStars" , action="categorias", url=host + "/pornstars")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search.php?what=%s&tab=" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def catalogo(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<div class="vidcountSp">(\d+)</div>.*?<a class="categoryTitle channelTitle" href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(data) + for cantidad,scrapedurl,scrapedtitle,scrapedthumbnail in matches: + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + title = scrapedtitle + " (" + cantidad + ")" + scrapedplot = "" + itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + if item.title=="PornStars" : + data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h2>(.*?)</section>') + patron = '<a class="thumb" href="([^"]+)">.*?<img src="([^"]+)".*?<div class="vidcountSp">(.*?)</div>.*?<a class="categoryTitle".*?>([^"]+)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: + scrapedplot = "" + if item.title=="Categorias" : + scrapedthumbnail = "http:" + scrapedthumbnail + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + if item.title=="PornStars" : + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "?section=videos" + scrapedtitle = scrapedtitle + " (" + cantidad + ")" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: + url = urlparse.urljoin(item.url,scrapedurl) + title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle + contentTitle = title + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<meta itemprop="contentUrl" content="([^"]+)" />' + matches = scrapertools.find_multiple_matches(data, patron) + for url in matches: + itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) + return itemlist + diff --git a/plugin.video.alfa/channels/tryboobs.json b/plugin.video.alfa/channels/tryboobs.json new file mode 100644 index 00000000..9fe3080f --- /dev/null +++ b/plugin.video.alfa/channels/tryboobs.json @@ -0,0 +1,16 @@ +{ + "id": "tryboobs", + "name": "tryboobs", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://tb3.fuckandcdn.com/tb/tbstatic/v30/common/tryboobs/img/logo.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/tryboobs.py b/plugin.video.alfa/channels/tryboobs.py new file mode 100644 index 00000000..3d94886a --- /dev/null +++ b/plugin.video.alfa/channels/tryboobs.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from platformcode import config, logger +from core import scrapertools +from core.item import Item +from core import servertools +from core import httptools +from core import tmdb +from core import jsontools + +host = 'http://www.tryboobs.com' + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/week/")) + itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="peliculas", url=host + "/top-rated/week/")) + itemlist.append( Item(channel=item.channel, title="Modelos" , action="modelos", url=host + "/models/model-viewed/1/")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search/?q=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def modelos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)" class="th-model">.*?src="([^"]+)".*?<span class="roliks"><span>(\d+)</span>.*?<span class="title">([^"]+)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: + scrapedplot = "" + scrapedtitle = scrapedtitle + " (" + cantidad + ")" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="modelos" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)" class="th-cat">.*?<img src="([^"]+)".*?<span>(\d+)</span>.*?<span class="title">([^"]+)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: + scrapedplot = "" + scrapedtitle = scrapedtitle + " (" + cantidad + ")" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = 'href="([^"]+)"\s*class="th-video.*?<img src="([^"]+)".*?<span class="time">([^"]+)</span>.*?<span class="title">([^"]+)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,duracion,scrapedtitle in matches: + url = scrapedurl + contentTitle = scrapedtitle + title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<video src="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for url in matches: + itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) + return itemlist + diff --git a/plugin.video.alfa/channels/videosXYZ.json b/plugin.video.alfa/channels/videosXYZ.json new file mode 100644 index 00000000..b9726ca3 --- /dev/null +++ b/plugin.video.alfa/channels/videosXYZ.json @@ -0,0 +1,16 @@ +{ + "id": "videosXYZ", + "name": "videosXYZ", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "https://free-porn-videos.xyz/wp-content/uploads/2018/10/cropped-Logo-org-Free-porn-videos.xyz-app-icon-192x192.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/videosXYZ.py b/plugin.video.alfa/channels/videosXYZ.py new file mode 100644 index 00000000..73a44dcd --- /dev/null +++ b/plugin.video.alfa/channels/videosXYZ.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys + +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://free-porn-videos.xyz' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/topics/porn-videos/")) + itemlist.append( Item(channel=item.channel, title="Parody" , action="peliculas", url=host + "/topics/free-porn-parodies/")) + itemlist.append( Item(channel=item.channel, title="BigTits" , action="peliculas", url=host + "/?s=big+tit")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl,scrapedtitle,scrapedthumbnail in matches: + scrapedplot = "" + scrapedtitle = scrapedtitle.replace("Permalink to Watch ", "").replace("Porn Online", "").replace("Permalink to ", "") + itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , contentTitle=scrapedtitle, plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"') + scrapedurl = scrapedurl.replace("%28", "(").replace("%29", ")") + itemlist = servertools.find_video_items(data=data) + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + return itemlist + diff --git a/plugin.video.alfa/channels/vintagetube.json b/plugin.video.alfa/channels/vintagetube.json new file mode 100644 index 00000000..ddf36990 --- /dev/null +++ b/plugin.video.alfa/channels/vintagetube.json @@ -0,0 +1,16 @@ +{ + "id": "vintagetube", + "name": "vintagetube", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/vintagetube.py b/plugin.video.alfa/channels/vintagetube.py new file mode 100644 index 00000000..725fd907 --- /dev/null +++ b/plugin.video.alfa/channels/vintagetube.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://www.vintagetube.club' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/tube/last-1/")) + itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/tube/popular-1/")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search/%s" % texto + item.url = item.url + "/popular-1/" + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div class="prev prev-ct">.*?<a href="(.*?)">.*?<img src="(.*?)".*?<span class="prev-tit">(.*?)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl,scrapedthumbnail,scrapedtitle in matches: + scrapedplot = "" + scrapedtitle = str(scrapedtitle) + scrapedurl = host + scrapedurl + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div class="prev">.*?<a href="(.*?)">.*?<img src="(.*?)">.*?<span class="prev-tit">(.*?)</span>.*?<div class="prev-dur"><span>(.*?)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: + scrapedplot = "" + scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + str(scrapedtitle) + scrapedurl = scrapedurl.replace("/xxx.php?tube=", "") + scrapedurl = host + scrapedurl + itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + current_page = scrapertools.find_single_match(data,'<li><span class="page">(.*?)</span></li>') + next_page = int(current_page) + 1 + url = item.url + url_page = current_page + "/" + url = url.replace(url_page, "") + next_page_url = url + str(next_page)+"/" + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'(.*?)\'') + if scrapedurl == "": + scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"') + scrapedurl = scrapedurl.replace ("http:", "") + data = httptools.downloadpage("http:" + scrapedurl).data + else: + data = httptools.downloadpage(scrapedurl).data + scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"') + data = httptools.downloadpage("https:" + scrapedurl).data + media_url = scrapertools.find_single_match(data,'<source src="(.*?)"') + itemlist = [] + itemlist.append(Item(channel=item.channel, action="play", title=media_url, fulltitle=media_url, url=media_url, + thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) + return itemlist + diff --git a/plugin.video.alfa/channels/vintagexxxsex.json b/plugin.video.alfa/channels/vintagexxxsex.json new file mode 100644 index 00000000..6c51cae5 --- /dev/null +++ b/plugin.video.alfa/channels/vintagexxxsex.json @@ -0,0 +1,16 @@ +{ + "id": "vintagexxxsex", + "name": "vintagexxxsex", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/vintagexxxsex.py b/plugin.video.alfa/channels/vintagexxxsex.py new file mode 100644 index 00000000..17018db9 --- /dev/null +++ b/plugin.video.alfa/channels/vintagexxxsex.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://www.vintagexxxsex.com' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/all-top/1/")) + itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/all-new/1/")) + itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/all-longest/1/")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<li><a href="([^"]+)"><i class="fa fa-tag"></i>(.*?)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + scrapedurl = host + scrapedurl + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div class="th">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<span class="th_nm">([^"]+)</span>.*?<i class="fa fa-clock-o"></i>([^"]+)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches: + contentTitle = scrapedtitle + title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle + scrapedurl = scrapedurl.replace("/up.php?xxx=", "") + scrapedurl = host + scrapedurl + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} )) + + + next_page_url = scrapertools.find_single_match(data,'<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + + + # else: + # patron = '<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"' + # next_page = re.compile(patron,re.DOTALL).findall(data) + # next_page = item.url + next_page[0] + # itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page[0] , text_color="blue", url=next_page[0] ) ) + return itemlist + + + +def play(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"') + data = httptools.downloadpage(scrapedurl).data + scrapedurl = scrapertools.find_single_match(data,'<source src="(.*?)"') + if scrapedurl == "": + scrapedurl = "http:" + scrapertools.find_single_match(data,'<iframe src="(.*?)"') + data = httptools.downloadpage(scrapedurl).data + scrapedurl = scrapertools.find_single_match(data,'file: "(.*?)"') + itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl, + thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) + return itemlist + diff --git a/plugin.video.alfa/channels/vporn.json b/plugin.video.alfa/channels/vporn.json new file mode 100644 index 00000000..a6579d50 --- /dev/null +++ b/plugin.video.alfa/channels/vporn.json @@ -0,0 +1,16 @@ +{ + "id": "vporn", + "name": "vporn", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "https://th-us2.vporn.com/images/logo%20Dark%20theme.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/vporn.py b/plugin.video.alfa/channels/vporn.py new file mode 100644 index 00000000..3b5bddb6 --- /dev/null +++ b/plugin.video.alfa/channels/vporn.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'https://www.vporn.com' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/newest/month/")) + itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/views/month/")) + itemlist.append( Item(channel=item.channel, title="Mejor Valoradas" , action="peliculas", url=host + "/rating/month/")) + itemlist.append( Item(channel=item.channel, title="Favoritas" , action="peliculas", url=host + "/favorites/month/")) + itemlist.append( Item(channel=item.channel, title="Mas Votada" , action="peliculas", url=host + "/votes/month/")) + itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/month/")) + itemlist.append( Item(channel=item.channel, title="PornStar" , action="catalogo", url=host + "/pornstars/")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search?q=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def catalogo(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<div class=\'star\'>.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?<span> (\d+) Videos' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: + scrapedplot = "" + scrapedtitle = scrapedtitle + " (" + cantidad + ")" + scrapedurl = host + scrapedurl + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="catalogo" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + data = scrapertools.get_match(data,'<div class="cats-all categories-list">(.*?)</div>') + patron = '<a href="([^"]+)".*?>([^"]+)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + scrapedurl = host + scrapedurl + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<div class="video">.*?<a href="([^"]+)".*?<span class="time">(.*?)</span>.*?<img src="([^"]+)" alt="([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches: + title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<source src="([^"]+)" type="video/mp4" label="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl,scrapedtitle in matches: + itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl)) + return itemlist + diff --git a/plugin.video.alfa/channels/watchpornfree.json b/plugin.video.alfa/channels/watchpornfree.json new file mode 100644 index 00000000..c6ad57a7 --- /dev/null +++ b/plugin.video.alfa/channels/watchpornfree.json @@ -0,0 +1,16 @@ +{ + "id": "watchpornfree", + "name": "watchpornfree", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "https://watchpornfree.ws/wp-content/uploads/2018/03/Untitled-2.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/watchpornfree.py b/plugin.video.alfa/channels/watchpornfree.py new file mode 100644 index 00000000..f2745a6f --- /dev/null +++ b/plugin.video.alfa/channels/watchpornfree.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +# https://playpornfree.org/ https://mangoporn.net/ https://watchfreexxx.net/ https://losporn.org/ https://xxxstreams.me/ https://speedporn.net/ + +host = 'https://watchpornfree.ws' + +def mainlist(item): + logger.info("") + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/movies")) + itemlist.append( Item(channel=item.channel, title="Parodia" , action="peliculas", url=host + "/category/parodies-hd")) + itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/clips-scenes")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info("") + texto = texto.replace(" ", "+") + item.url = host + "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info("") + itemlist = [] + data = httptools.downloadpage(item.url).data + if item.title == "Canal": + data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>') + if item.title == "Año": + data = scrapertools.get_match(data,'>Years</a>(.*?)</ul>') + if item.title == "Categorias": + data = scrapertools.get_match(data,'>XXX Genres</div>(.*?)</ul>') + patron = '<a href="(.*?)".*?>(.*?)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + +def peliculas(item): + logger.info("") + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<article class="TPost B">.*?<a href="([^"]+)">.*?src="([^"]+)".*?<div class="Title">([^"]+)</div>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle in matches: + scrapedplot = "" + itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + diff --git a/plugin.video.alfa/channels/webpeliculasporno.json b/plugin.video.alfa/channels/webpeliculasporno.json new file mode 100644 index 00000000..a048c974 --- /dev/null +++ b/plugin.video.alfa/channels/webpeliculasporno.json @@ -0,0 +1,16 @@ +{ + "id": "webpeliculasporno", + "name": "webpeliculasporno", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://www.webpeliculasporno.com/wp-content/uploads/logo.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/webpeliculasporno.py b/plugin.video.alfa/channels/webpeliculasporno.py new file mode 100644 index 00000000..b1382d98 --- /dev/null +++ b/plugin.video.alfa/channels/webpeliculasporno.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys + +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://www.webpeliculasporno.com' + + +def mainlist(item): + logger.info("pelisalacarta.webpeliculasporno mainlist") + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url= host)) + itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url= host + "/?display=tube&filtre=views")) + itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url= host + "/?display=tube&filtre=rate")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host)) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info("pelisalacarta.gmobi mainlist") + texto = texto.replace(" ", "+") + item.url = host + "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + itemlist = [] + data = scrapertools.cache_page(item.url) + patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<li class="border-radius-5 box-shadow">.*?' + patron += 'src="([^"]+)".*?' + patron += '<a href="([^"]+)" title="([^"]+)">' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedthumbnail,scrapedurl,scrapedtitle in matches: + url = urlparse.urljoin(item.url,scrapedurl) + title = scrapedtitle + contentTitle = title + thumbnail = scrapedthumbnail + plot = "" + itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle )) + next_page_url = scrapertools.find_single_match(data,'<li><a class="next page-numbers" href="([^"]+)">Next') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel, action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + diff --git a/plugin.video.alfa/channels/woodrocket.json b/plugin.video.alfa/channels/woodrocket.json new file mode 100644 index 00000000..02ea74a1 --- /dev/null +++ b/plugin.video.alfa/channels/woodrocket.json @@ -0,0 +1,16 @@ +{ + "id": "woodrocket", + "name": "woodrocket", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://woodrocket.com/img//logo.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/woodrocket.py b/plugin.video.alfa/channels/woodrocket.py new file mode 100644 index 00000000..e9706cc2 --- /dev/null +++ b/plugin.video.alfa/channels/woodrocket.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://woodrocket.com' + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/porn")) + itemlist.append( Item(channel=item.channel, title="Parodias" , action="peliculas", url=host + "/parodies")) + itemlist.append( Item(channel=item.channel, title="Shows" , action="categorias", url=host + "/series")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories")) + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div class="media-panel-image">.*?<img src="(.*?)".*?<a href="(.*?)">(.*?)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedthumbnail,scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = host + scrapedthumbnail + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<div class="media-panel-image">.*?<a href="([^"]+)".*?title="([^"]+)".*?<img src="([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle,scrapedthumbnail in matches: + plot = "" + contentTitle = scrapedtitle + thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) + title = scrapedtitle + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">»</a></li>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<iframe src="(.*?)"' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl in matches: + scrapedurl = scrapedurl + data = httptools.downloadpage(scrapedurl).data + scrapedurl = scrapertools.find_single_match(data,'"quality":"\d*","videoUrl":"(.*?)"') + scrapedurl = scrapedurl.replace("\/", "/") + itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl)) + return itemlist + + diff --git a/plugin.video.alfa/channels/xozilla.json b/plugin.video.alfa/channels/xozilla.json new file mode 100644 index 00000000..aae90940 --- /dev/null +++ b/plugin.video.alfa/channels/xozilla.json @@ -0,0 +1,16 @@ +{ + "id": "xozilla", + "name": "xozilla", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "https://www.xozilla.com/images/logo.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/xozilla.py b/plugin.video.alfa/channels/xozilla.py new file mode 100644 index 00000000..54e6baf4 --- /dev/null +++ b/plugin.video.alfa/channels/xozilla.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys + +from platformcode import config, logger +from core import scrapertools +from core.item import Item +from core import servertools +from core import httptools +from core import tmdb +from core import jsontools + +host = 'https://www.xozilla.com' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/latest-updates/")) + itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/")) + itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated/")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search/%s/" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def catalogo(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<img class="thumb" src="([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle,scrapedthumbnail in matches: + scrapedplot = "" + thumbnail = "http:" + scrapedthumbnail + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=thumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<img class="thumb" src="([^"]+)".*?</i> (\d+) videos</div>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: + scrapedplot = "" + scrapedtitle = scrapedtitle + " (" + cantidad + ")" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)" class="item.*?data-original="([^"]+)".*?alt="([^"]+)".*?<div class="duration">(.*?)</div>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: + url = scrapedurl + title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle + contentTitle = title + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"') + if next_page_url!="#videos": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + if next_page_url=="#videos": + next_page_url = scrapertools.find_single_match(data,'from:(\d+)">Next</a>') + next_page_url = urlparse.urljoin(item.url,next_page_url) + "/" + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + media_url = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)/\'') + if media_url == "": + media_url = scrapertools.find_single_match(data, 'video_url: \'([^\']+)/\'') + itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url, + thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) + return itemlist + + diff --git a/plugin.video.alfa/channels/xtapes.json b/plugin.video.alfa/channels/xtapes.json new file mode 100644 index 00000000..9a1cc2fa --- /dev/null +++ b/plugin.video.alfa/channels/xtapes.json @@ -0,0 +1,16 @@ +{ + "id": "xtapes", + "name": "xtapes", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://hd.xtapes.to/wp-content/uploads/xtapes.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/xtapes.py b/plugin.video.alfa/channels/xtapes.py new file mode 100644 index 00000000..ab9a3802 --- /dev/null +++ b/plugin.video.alfa/channels/xtapes.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys + +from platformcode import config, logger +from core import scrapertools +from core.item import Item +from core import servertools +from core import httptools +from core import tmdb +from core import jsontools + +host = 'http://hd.xtapes.to' + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/full-porn-movies/?display=tube&filtre=date")) + itemlist.append( Item(channel=item.channel, title="Peliculas Estudio" , action="catalogo", url=host)) + itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/?filtre=date&cat=0")) + itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="peliculas", url=host + "/?display=tube&filtre=views")) + itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/?display=tube&filtre=rate")) + itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/?display=tube&filtre=duree")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host)) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def catalogo(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + if item.title=="Canal": + data = scrapertools.get_match(data,'<div class="footer-banner">(.*?)<div id="footer-copyright">') + else: + data = scrapertools.get_match(data,'<li id="menu-item-16"(.*?)</ul>') + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)">([^"]+)</a></li>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data,'<a>Categories</a>(.*?)</ul>') + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)">([^"]+)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + scrapedtitle = scrapedtitle + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<li class="border-radius-5 box-shadow">.*?src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?<div class="time-infos".*?>([^"]+)<span class="time-img">' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedthumbnail,scrapedurl,scrapedtitle,duracion in matches: + url = urlparse.urljoin(item.url,scrapedurl) + title = scrapedtitle + title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle + contentTitle = title + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle = title, contentTitle = contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next video') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + next_page_url = next_page_url.replace("#038;cat=0#038;", "").replace("#038;filtre=views#038;", "").replace("#038;filtre=rate#038;", "").replace("#038;filtre=duree#038;", "") + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + variable = scrapertools.find_single_match(data,'<script type=\'text/javascript\'> str=\'([^\']+)\'') + resuelta = re.sub("@[A-F0-9][A-F0-9]", lambda m: m.group()[1:].decode('hex'), variable) + url = scrapertools.find_single_match(resuelta,'<iframe src="([^"]+)"') + data = scrapertools.cachePage(url) + itemlist = servertools.find_video_items(data=data) + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + return itemlist + diff --git a/plugin.video.alfa/channels/xxxparodyhd.json b/plugin.video.alfa/channels/xxxparodyhd.json new file mode 100644 index 00000000..54b05121 --- /dev/null +++ b/plugin.video.alfa/channels/xxxparodyhd.json @@ -0,0 +1,16 @@ +{ + "id": "xxxparodyhd", + "name": "xxxparodyhd", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "https://xxxparodyhd.net/wp-content/uploads/2018/04/parodyhd-1.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/xxxparodyhd.py b/plugin.video.alfa/channels/xxxparodyhd.py new file mode 100644 index 00000000..1933b4b6 --- /dev/null +++ b/plugin.video.alfa/channels/xxxparodyhd.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'https://xxxparodyhd.net' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/genre/new-release/")) + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/movies/")) + itemlist.append( Item(channel=item.channel, title="Parodias" , action="peliculas", url=host + "/genre/parodies/")) + itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/genre/clips-scenes/")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/categories")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + if item.title == "Canal" : + data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>') + else: + data = scrapertools.get_match(data,'<div class=\'sub-container\' style=\'display: none;\'><ul class=\'sub-menu\'>(.*?)</ul>') + patron = '<a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div data-movie-id="\d+" class="ml-item">.*?<a href="([^"]+)".*?oldtitle="([^"]+)".*?<img src="([^"]+)".*?rel="tag">(.*?)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle,scrapedthumbnail,year in matches: + scrapedplot = "" + scrapedtitle = str(scrapedtitle) + " " + year + itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<li class=\'active\'>.*?href=\'([^\']+)\'>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + diff --git a/plugin.video.alfa/channels/xxxstreams.json b/plugin.video.alfa/channels/xxxstreams.json new file mode 100644 index 00000000..522372fd --- /dev/null +++ b/plugin.video.alfa/channels/xxxstreams.json @@ -0,0 +1,16 @@ +{ + "id": "xxxstreams", + "name": "xxxstreams", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/xxxstreams.py b/plugin.video.alfa/channels/xxxstreams.py new file mode 100644 index 00000000..e0b8f8f3 --- /dev/null +++ b/plugin.video.alfa/channels/xxxstreams.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys + +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://xxxstreams.org' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url= host + "/category/full-porn-movie-stream/")) + itemlist.append( Item(channel=item.channel, title="Clips" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/category/full-porn-movie-stream/")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<li id="menu-item.*?class="menu-item menu-item-type-taxonomy.*?<a href="([^<]+)">(.*?)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = '<div class="entry-content">.*?<img src="([^"]+)".*?<a href="([^<]+)".*?<span class="screen-reader-text">(.*?)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedthumbnail,scrapedurl,scrapedtitle in matches: + scrapedplot = "" + itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next →</a>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = scrapertools.get_match(data,'--more--></p>(.*?)/a></p>') + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)".*?class="external">(.*?)<' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=item.title, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True) ) + return itemlist + + +def play(item): + logger.info() + data = scrapertools.cachePage(item.url) + itemlist = servertools.find_video_items(data=data) + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + return itemlist + diff --git a/plugin.video.alfa/channels/youjizz.json b/plugin.video.alfa/channels/youjizz.json new file mode 100644 index 00000000..cdc171b9 --- /dev/null +++ b/plugin.video.alfa/channels/youjizz.json @@ -0,0 +1,16 @@ +{ + "id": "youjizz", + "name": "youjizz", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "https://cdne-static.yjcontentdelivery.com/app/1/images/yjlogo.jpeg", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/youjizz.py b/plugin.video.alfa/channels/youjizz.py new file mode 100644 index 00000000..4f2fedf6 --- /dev/null +++ b/plugin.video.alfa/channels/youjizz.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from platformcode import config, logger +from core import scrapertools +from core.item import Item +from core import servertools +from core import httptools +from core import tmdb +from core import jsontools + +host = 'https://www.youjizz.com' + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest-clips/1.html")) + itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/1.html")) + itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated-week/1.html")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search/%s-1.html" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data,'<h4>Trending Categories</h4>(.*?)</ul>') + data = re.sub(r"\n|\r|\t| |<br>", "", data) + itemlist.append( Item(channel=item.channel, action="peliculas", title="Big Tits" , url="https://www.youjizz.com/search/big-tits-1.html?" , folder=True) ) + patron = '<li><a href="([^"]+)">([^"]+)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + scrapedtitle = scrapedtitle + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<div class="video-item">.*?class="frame image" href="([^"]+)".*?data-original="([^"]+)" />.*?<div class="video-title">.*?>(.*?)</a>.*?<span class="time">(.*?)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: + url = urlparse.urljoin(item.url,scrapedurl) + title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle + contentTitle = title + thumbnail = "http:" + scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<li><a class="pagination-next" href="([^"]+)">Next »</a>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + media_url = scrapertools.find_single_match(data, '"filename"\:"(.*?)"') + media_url = "https:" + media_url.replace("\\", "") + itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url, + thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) + return itemlist + diff --git a/plugin.video.alfa/channels/youporn.json b/plugin.video.alfa/channels/youporn.json new file mode 100644 index 00000000..d63d7d3c --- /dev/null +++ b/plugin.video.alfa/channels/youporn.json @@ -0,0 +1,16 @@ +{ + "id": "youporn", + "name": "youporn", + "active": true, + "adult": false, + "language": ["*"], + "thumbnail": "https://fs.ypncdn.com/cb/bundles/youpornwebfront/images/l_youporn_black.png", + "banner": "", + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/youporn.py b/plugin.video.alfa/channels/youporn.py new file mode 100644 index 00000000..aa65ff46 --- /dev/null +++ b/plugin.video.alfa/channels/youporn.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys +from platformcode import config, logger +from core import scrapertools +from core.item import Item +from core import servertools +from core import httptools +from core import tmdb +from core import jsontools + +host = 'https://www.youporn.com' + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/browse/time/")) + itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/browse/views/")) + itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top_rated/")) + itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/most_popular/")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/alphabetical/")) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host + "/search/?query=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def catalogo(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data,'<a href="/pornstars/most_popular/" class="selected">All</a>(.*?)<i class=\'icon-menu-right\'></i></a>') + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)".*?data-original="([^"]+)".*?<span class="porn-star-name">([^"]+)</span>.*?<span class="video-count">([^"]+)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: + scrapedplot = "" + scrapedtitle = scrapedtitle + " (" + cantidad + ")" + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" data-page-number=.*?>') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data,'<div class=\'row alphabetical\'.*?>(.*?)<h2 class="heading4">Popular by Country</h2>') + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)".*?data-original="([^"]+)".*?<p>([^"]+)<span>([^"]+)</span>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: + scrapedplot = "" + scrapedthumbnail = "http:" + scrapedthumbnail + scrapedtitle = scrapedtitle + " (" + cantidad +")" + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a href="([^"]+)" class=\'video-box-image\'.*?data-original="([^"]+)".*?<div class="video-box-title">([^"]+)</div>.*?<div class="video-duration">(.*?)</div>' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: + url = urlparse.urljoin(item.url,scrapedurl) + title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle + contentTitle = title + thumbnail = scrapedthumbnail + plot = "" + year = "" + itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) + next_page_url = scrapertools.find_single_match(data,'<div class="prev-next"><a href="([^"]+)"') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url) + patron = 'page_params.video.mediaDefinition =.*?"videoUrl":"([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl in matches: + scrapedurl = scrapedurl.replace("\/", "/") + itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl)) + return itemlist + + diff --git a/plugin.video.alfa/channels/yuuk.json b/plugin.video.alfa/channels/yuuk.json new file mode 100644 index 00000000..9e9f3053 --- /dev/null +++ b/plugin.video.alfa/channels/yuuk.json @@ -0,0 +1,17 @@ +{ + "id": "yuuk", + "name": "yuuk", + "active": true, + "adult": true, + "language": ["*"], + "thumbnail": "http://yuuk.net/wp-content/uploads/2018/06/yuuk_net_logo.png", + "banner": "", + ], + "categories": [ + "adult" + ], + "settings": [ + + ] +} + diff --git a/plugin.video.alfa/channels/yuuk.py b/plugin.video.alfa/channels/yuuk.py new file mode 100644 index 00000000..ff6857d2 --- /dev/null +++ b/plugin.video.alfa/channels/yuuk.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +import urlparse,urllib2,urllib,re +import os, sys + +from core import jsontools as json +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import httptools +from core import tmdb + +host = 'http://yuuk.net' + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) + itemlist.append( Item(channel=item.channel, title="Buscar" , action="search")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = host+ "/?s=%s" % texto + try: + return peliculas(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + itemlist.append( Item(channel=item.channel, action="peliculas", title="Big Tits" , url="http://yuuk.net/?s=big+tit" , folder=True) ) + patron = 'menu-item-object-category"><a href="([^"]+)">.*?</style>([^"]+)</a>' + matches = re.compile(patron,re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl,scrapedtitle in matches: + scrapedplot = "" + scrapedthumbnail = "" + itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl,scrapedtitle,scrapedthumbnail in matches: + scrapedplot = "" + itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + + return itemlist + From b68489d04dfeb4ed8805428781816edd75f88c84 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Tue, 27 Nov 2018 18:16:07 +0100 Subject: [PATCH 10/24] =?UTF-8?q?SMB=20client:=20versi=C3=B3n=201.1.25=20d?= =?UTF-8?q?e=20pysmb?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hace posible el funcionamiento de la Videoteca en un servidor Samba (disco contectado a router, NAS, servidor windows,...) --- plugin.video.alfa/core/filetools.py | 21 + plugin.video.alfa/lib/sambatools/libsmb.py | 8 +- .../lib/sambatools/nmb/NetBIOS.py | 293 +- .../lib/sambatools/nmb/NetBIOSProtocol.py | 276 +- plugin.video.alfa/lib/sambatools/nmb/base.py | 360 +- .../lib/sambatools/nmb/nmb_constants.py | 76 +- .../lib/sambatools/nmb/nmb_structs.py | 138 +- plugin.video.alfa/lib/sambatools/nmb/utils.py | 100 +- .../lib/sambatools/smb/SMBConnection.py | 1212 ++-- .../lib/sambatools/smb/SMBHandler.py | 199 +- .../lib/sambatools/smb/SMBProtocol.py | 807 +-- .../lib/sambatools/smb/__init__.py | 2 +- plugin.video.alfa/lib/sambatools/smb/base.py | 5593 +++++++++-------- plugin.video.alfa/lib/sambatools/smb/ntlm.py | 497 +- .../sambatools/smb/security_descriptors.py | 367 ++ .../lib/sambatools/smb/securityblob.py | 272 +- .../lib/sambatools/smb/smb2_constants.py | 216 +- .../lib/sambatools/smb/smb2_structs.py | 1852 +++--- .../lib/sambatools/smb/smb_constants.py | 496 +- .../lib/sambatools/smb/smb_structs.py | 2844 +++++---- .../lib/sambatools/smb/utils/README.txt | 24 +- .../lib/sambatools/smb/utils/__init__.py | 6 +- .../lib/sambatools/smb/utils/md4.py | 508 +- .../lib/sambatools/smb/utils/pyDes.py | 1704 ++--- .../lib/sambatools/smb/utils/sha256.py | 222 +- .../platformcode/platformtools.py | 4 +- .../platformcode/xbmc_videolibrary.py | 4 +- 27 files changed, 9496 insertions(+), 8605 deletions(-) create mode 100644 plugin.video.alfa/lib/sambatools/smb/security_descriptors.py diff --git a/plugin.video.alfa/core/filetools.py b/plugin.video.alfa/core/filetools.py index 4bfc111f..278c9dea 100755 --- a/plugin.video.alfa/core/filetools.py +++ b/plugin.video.alfa/core/filetools.py @@ -576,3 +576,24 @@ def remove_tags(title): return title_without_tags else: return title + + +def remove_smb_credential(path): + """ + devuelve el path sin contraseña/usuario para paths de SMB + @param path: ruta + @type path: str + @return: cadena sin credenciales + @rtype: str + """ + logger.info() + + if not path.startswith("smb://"): + return path + + path_without_credentials = scrapertools.find_single_match(path, '^smb:\/\/(?:[^;\n]+;)?(?:[^:@\n]+[:|@])?(?:[^@\n]+@)?(.*?$)') + + if path_without_credentials: + return ('smb://' + path_without_credentials) + else: + return path diff --git a/plugin.video.alfa/lib/sambatools/libsmb.py b/plugin.video.alfa/lib/sambatools/libsmb.py index 74506e92..64130347 100755 --- a/plugin.video.alfa/lib/sambatools/libsmb.py +++ b/plugin.video.alfa/lib/sambatools/libsmb.py @@ -1,18 +1,21 @@ # -*- coding: utf-8 -*- import os +import re from nmb.NetBIOS import NetBIOS from platformcode import logger from smb.SMBConnection import SMBConnection +GitHub = 'https://github.com/miketeo/pysmb' #buscar aquí de vez en cuando la última versiónde SMB-pysmb, y actualizar en Alfa +vesion_actual_pysmb = '1.1.25' #actualizada el 25/11/2018 + remote = None def parse_url(url): # logger.info("Url: %s" % url) url = url.strip() - import re patron = "^smb://(?:([^;\n]+);)?(?:([^:@\n]+)[:|@])?(?:([^@\n]+)@)?([^/]+)/([^/\n]+)([/]?.*?)$" domain, user, password, server_name, share_name, path = re.compile(patron, re.DOTALL).match(url).groups() @@ -29,8 +32,7 @@ def parse_url(url): def get_server_name_ip(server): - import re - if re.compile("^\d+.\d+.\d+.\d+$").findall(server): + if re.compile("^\d+.\d+.\d+.\d+$").findall(server) or re.compile("^([^\.]+\.(?:[^\.]+\.)?(?:\w+)?)$").findall(server): server_ip = server server_name = None else: diff --git a/plugin.video.alfa/lib/sambatools/nmb/NetBIOS.py b/plugin.video.alfa/lib/sambatools/nmb/NetBIOS.py index 89df49e6..34058054 100755 --- a/plugin.video.alfa/lib/sambatools/nmb/NetBIOS.py +++ b/plugin.video.alfa/lib/sambatools/nmb/NetBIOS.py @@ -1,149 +1,144 @@ -import logging -import random -import select -import socket -import time - -from base import NBNS, NotConnectedError -from nmb_constants import TYPE_SERVER - - -class NetBIOS(NBNS): - - log = logging.getLogger('NMB.NetBIOS') - - def __init__(self, broadcast = True, listen_port = 0): - """ - Instantiate a NetBIOS instance, and creates a IPv4 UDP socket to listen/send NBNS packets. - - :param boolean broadcast: A boolean flag to indicate if we should setup the listening UDP port in broadcast mode - :param integer listen_port: Specifies the UDP port number to bind to for listening. If zero, OS will automatically select a free port number. - """ - self.broadcast = broadcast - self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - if self.broadcast: - self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - if listen_port: - self.sock.bind(( '', listen_port )) - - def close(self): - """ - Close the underlying and free resources. - - The NetBIOS instance should not be used to perform any operations after this method returns. - - :return: None - """ - self.sock.close() - self.sock = None - - def write(self, data, ip, port): - assert self.sock, 'Socket is already closed' - self.sock.sendto(data, ( ip, port )) - - def queryName(self, name, ip = '', port = 137, timeout = 30): - """ - Send a query on the network and hopes that if machine matching the *name* will reply with its IP address. - - :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. - If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. - :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. - :param integer/float timeout: Number of seconds to wait for a reply, after which the method will return None - :return: A list of IP addresses in dotted notation (aaa.bbb.ccc.ddd). On timeout, returns None. - """ - assert self.sock, 'Socket is already closed' - - trn_id = random.randint(1, 0xFFFF) - data = self.prepareNameQuery(trn_id, name) - if self.broadcast and not ip: - ip = '<broadcast>' - elif not ip: - self.log.warning('queryName: ip parameter is empty. OS might not transmit this query to the network') - - self.write(data, ip, port) - - return self._pollForNetBIOSPacket(trn_id, timeout) - - def queryIPForName(self, ip, port = 137, timeout = 30): - """ - Send a query to the machine with *ip* and hopes that the machine will reply back with its name. - - The implementation of this function is contributed by Jason Anderson. - - :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. - If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. - :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. - :param integer/float timeout: Number of seconds to wait for a reply, after which the method will return None - :return: A list of string containing the names of the machine at *ip*. On timeout, returns None. - """ - assert self.sock, 'Socket is already closed' - - trn_id = random.randint(1, 0xFFFF) - data = self.prepareNetNameQuery(trn_id, False) - self.write(data, ip, port) - ret = self._pollForQueryPacket(trn_id, timeout) - if ret: - return map(lambda s: s[0], filter(lambda s: s[1] == TYPE_SERVER, ret)) - else: - return None - - # - # Protected Methods - # - - def _pollForNetBIOSPacket(self, wait_trn_id, timeout): - end_time = time.time() + timeout - while True: - try: - _timeout = end_time - time.time() - if _timeout <= 0: - return None - - ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], _timeout) - if not ready: - return None - - data, _ = self.sock.recvfrom(0xFFFF) - if len(data) == 0: - raise NotConnectedError - - trn_id, ret = self.decodePacket(data) - - if trn_id == wait_trn_id: - return ret - except select.error, ex: - if type(ex) is types.TupleType: - if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: - raise ex - else: - raise ex - - # - # Contributed by Jason Anderson - # - def _pollForQueryPacket(self, wait_trn_id, timeout): - end_time = time.time() + timeout - while True: - try: - _timeout = end_time - time.time() - if _timeout <= 0: - return None - - ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], _timeout) - if not ready: - return None - - data, _ = self.sock.recvfrom(0xFFFF) - if len(data) == 0: - raise NotConnectedError - - trn_id, ret = self.decodeIPQueryPacket(data) - - if trn_id == wait_trn_id: - return ret - except select.error, ex: - if type(ex) is types.TupleType: - if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: - raise ex - else: - raise ex + +import os, logging, random, socket, time, select +from base import NBNS, NotConnectedError +from nmb_constants import TYPE_CLIENT, TYPE_SERVER, TYPE_WORKSTATION + +class NetBIOS(NBNS): + + log = logging.getLogger('NMB.NetBIOS') + + def __init__(self, broadcast = True, listen_port = 0): + """ + Instantiate a NetBIOS instance, and creates a IPv4 UDP socket to listen/send NBNS packets. + + :param boolean broadcast: A boolean flag to indicate if we should setup the listening UDP port in broadcast mode + :param integer listen_port: Specifies the UDP port number to bind to for listening. If zero, OS will automatically select a free port number. + """ + self.broadcast = broadcast + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + if self.broadcast: + self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + if listen_port: + self.sock.bind(( '', listen_port )) + + def close(self): + """ + Close the underlying and free resources. + + The NetBIOS instance should not be used to perform any operations after this method returns. + + :return: None + """ + self.sock.close() + self.sock = None + + def write(self, data, ip, port): + assert self.sock, 'Socket is already closed' + self.sock.sendto(data, ( ip, port )) + + def queryName(self, name, ip = '', port = 137, timeout = 30): + """ + Send a query on the network and hopes that if machine matching the *name* will reply with its IP address. + + :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. + If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. + :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. + :param integer/float timeout: Number of seconds to wait for a reply, after which the method will return None + :return: A list of IP addresses in dotted notation (aaa.bbb.ccc.ddd). On timeout, returns None. + """ + assert self.sock, 'Socket is already closed' + + trn_id = random.randint(1, 0xFFFF) + data = self.prepareNameQuery(trn_id, name) + if self.broadcast and not ip: + ip = '<broadcast>' + elif not ip: + self.log.warning('queryName: ip parameter is empty. OS might not transmit this query to the network') + + self.write(data, ip, port) + + return self._pollForNetBIOSPacket(trn_id, timeout) + + def queryIPForName(self, ip, port = 137, timeout = 30): + """ + Send a query to the machine with *ip* and hopes that the machine will reply back with its name. + + The implementation of this function is contributed by Jason Anderson. + + :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. + If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. + :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. + :param integer/float timeout: Number of seconds to wait for a reply, after which the method will return None + :return: A list of string containing the names of the machine at *ip*. On timeout, returns None. + """ + assert self.sock, 'Socket is already closed' + + trn_id = random.randint(1, 0xFFFF) + data = self.prepareNetNameQuery(trn_id, False) + self.write(data, ip, port) + ret = self._pollForQueryPacket(trn_id, timeout) + if ret: + return map(lambda s: s[0], filter(lambda s: s[1] == TYPE_SERVER, ret)) + else: + return None + + # + # Protected Methods + # + + def _pollForNetBIOSPacket(self, wait_trn_id, timeout): + end_time = time.time() + timeout + while True: + try: + _timeout = end_time - time.time() + if _timeout <= 0: + return None + + ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], _timeout) + if not ready: + return None + + data, _ = self.sock.recvfrom(0xFFFF) + if len(data) == 0: + raise NotConnectedError + + trn_id, ret = self.decodePacket(data) + + if trn_id == wait_trn_id: + return ret + except select.error, ex: + if type(ex) is types.TupleType: + if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: + raise ex + else: + raise ex + + # + # Contributed by Jason Anderson + # + def _pollForQueryPacket(self, wait_trn_id, timeout): + end_time = time.time() + timeout + while True: + try: + _timeout = end_time - time.time() + if _timeout <= 0: + return None + + ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], _timeout) + if not ready: + return None + + data, _ = self.sock.recvfrom(0xFFFF) + if len(data) == 0: + raise NotConnectedError + + trn_id, ret = self.decodeIPQueryPacket(data) + + if trn_id == wait_trn_id: + return ret + except select.error, ex: + if type(ex) is types.TupleType: + if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: + raise ex + else: + raise ex diff --git a/plugin.video.alfa/lib/sambatools/nmb/NetBIOSProtocol.py b/plugin.video.alfa/lib/sambatools/nmb/NetBIOSProtocol.py index 2c8dce2e..3d9b6b87 100755 --- a/plugin.video.alfa/lib/sambatools/nmb/NetBIOSProtocol.py +++ b/plugin.video.alfa/lib/sambatools/nmb/NetBIOSProtocol.py @@ -1,140 +1,136 @@ -import logging -import random -import socket -import time - -from twisted.internet import reactor, defer -from twisted.internet.protocol import DatagramProtocol - -from base import NBNS -from nmb_constants import TYPE_SERVER - -IP_QUERY, NAME_QUERY = range(2) - -class NetBIOSTimeout(Exception): - """Raised in NBNSProtocol via Deferred.errback method when queryName method has timeout waiting for reply""" - pass - -class NBNSProtocol(DatagramProtocol, NBNS): - - log = logging.getLogger('NMB.NBNSProtocol') - - def __init__(self, broadcast = True, listen_port = 0): - """ - Instantiate a NBNSProtocol instance. - - This automatically calls reactor.listenUDP method to start listening for incoming packets, so you **must not** call the listenUDP method again. - - :param boolean broadcast: A boolean flag to indicate if we should setup the listening UDP port in broadcast mode - :param integer listen_port: Specifies the UDP port number to bind to for listening. If zero, OS will automatically select a free port number. - """ - self.broadcast = broadcast - self.pending_trns = { } # TRN ID -> ( expiry_time, name, Deferred instance ) - self.transport = reactor.listenUDP(listen_port, self) - if self.broadcast: - self.transport.getHandle().setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - reactor.callLater(1, self.cleanupPendingTrns) - - def datagramReceived(self, data, from_info): - host, port = from_info - trn_id, ret = self.decodePacket(data) - - # pending transaction exists for trn_id - handle it and remove from queue - if trn_id in self.pending_trns: - _, ip, d = self.pending_trns.pop(trn_id) - if ip is NAME_QUERY: - # decode as query packet - trn_id, ret = self.decodeIPQueryPacket(data) - d.callback(ret) - - def write(self, data, ip, port): - # We don't use the transport.write method directly as it keeps raising DeprecationWarning for ip='<broadcast>' - self.transport.getHandle().sendto(data, ( ip, port )) - - def queryName(self, name, ip = '', port = 137, timeout = 30): - """ - Send a query on the network and hopes that if machine matching the *name* will reply with its IP address. - - :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. - If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. - :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. - :param integer/float timeout: Number of seconds to wait for a reply, after which the returned Deferred instance will be called with a NetBIOSTimeout exception. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of IP addresses in dotted notation (aaa.bbb.ccc.ddd). - On timeout, the errback function will be called with a Failure instance wrapping around a NetBIOSTimeout exception - """ - trn_id = random.randint(1, 0xFFFF) - while True: - if not self.pending_trns.has_key(trn_id): - break - else: - trn_id = (trn_id + 1) & 0xFFFF - - data = self.prepareNameQuery(trn_id, name) - if self.broadcast and not ip: - ip = '<broadcast>' - elif not ip: - self.log.warning('queryName: ip parameter is empty. OS might not transmit this query to the network') - - self.write(data, ip, port) - - d = defer.Deferred() - self.pending_trns[trn_id] = ( time.time()+timeout, name, d ) - return d - - def queryIPForName(self, ip, port = 137, timeout = 30): - """ - Send a query to the machine with *ip* and hopes that the machine will reply back with its name. - - The implementation of this function is contributed by Jason Anderson. - - :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. - If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. - :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. - :param integer/float timeout: Number of seconds to wait for a reply, after which the method will return None - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of names of the machine at *ip*. - On timeout, the errback function will be called with a Failure instance wrapping around a NetBIOSTimeout exception - """ - trn_id = random.randint(1, 0xFFFF) - while True: - if not self.pending_trns.has_key(trn_id): - break - else: - trn_id = (trn_id + 1) & 0xFFFF - - data = self.prepareNetNameQuery(trn_id) - self.write(data, ip, port) - - d = defer.Deferred() - d2 = defer.Deferred() - d2.addErrback(d.errback) - - def stripCode(ret): - if ret is not None: # got valid response. Somehow the callback is also called when there is an error. - d.callback(map(lambda s: s[0], filter(lambda s: s[1] == TYPE_SERVER, ret))) - - d2.addCallback(stripCode) - self.pending_trns[trn_id] = ( time.time()+timeout, NAME_QUERY, d2 ) - return d - - def stopProtocol(self): - DatagramProtocol.stopProtocol(self) - - def cleanupPendingTrns(self): - now = time.time() - - # reply should have been received in the past - expired = filter(lambda (trn_id, (expiry_time, name, d)): expiry_time < now, self.pending_trns.iteritems()) - - # remove expired items from dict + call errback - def expire_item(item): - trn_id, (expiry_time, name, d) = item - - del self.pending_trns[trn_id] - try: - d.errback(NetBIOSTimeout(name)) - except: pass - - map(expire_item, expired) - - if self.transport: - reactor.callLater(1, self.cleanupPendingTrns) + +import os, logging, random, socket, time +from twisted.internet import reactor, defer +from twisted.internet.protocol import DatagramProtocol +from nmb_constants import TYPE_SERVER +from base import NBNS + +IP_QUERY, NAME_QUERY = range(2) + +class NetBIOSTimeout(Exception): + """Raised in NBNSProtocol via Deferred.errback method when queryName method has timeout waiting for reply""" + pass + +class NBNSProtocol(DatagramProtocol, NBNS): + + log = logging.getLogger('NMB.NBNSProtocol') + + def __init__(self, broadcast = True, listen_port = 0): + """ + Instantiate a NBNSProtocol instance. + + This automatically calls reactor.listenUDP method to start listening for incoming packets, so you **must not** call the listenUDP method again. + + :param boolean broadcast: A boolean flag to indicate if we should setup the listening UDP port in broadcast mode + :param integer listen_port: Specifies the UDP port number to bind to for listening. If zero, OS will automatically select a free port number. + """ + self.broadcast = broadcast + self.pending_trns = { } # TRN ID -> ( expiry_time, name, Deferred instance ) + self.transport = reactor.listenUDP(listen_port, self) + if self.broadcast: + self.transport.getHandle().setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + reactor.callLater(1, self.cleanupPendingTrns) + + def datagramReceived(self, data, from_info): + host, port = from_info + trn_id, ret = self.decodePacket(data) + + # pending transaction exists for trn_id - handle it and remove from queue + if trn_id in self.pending_trns: + _, ip, d = self.pending_trns.pop(trn_id) + if ip is NAME_QUERY: + # decode as query packet + trn_id, ret = self.decodeIPQueryPacket(data) + d.callback(ret) + + def write(self, data, ip, port): + # We don't use the transport.write method directly as it keeps raising DeprecationWarning for ip='<broadcast>' + self.transport.getHandle().sendto(data, ( ip, port )) + + def queryName(self, name, ip = '', port = 137, timeout = 30): + """ + Send a query on the network and hopes that if machine matching the *name* will reply with its IP address. + + :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. + If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. + :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. + :param integer/float timeout: Number of seconds to wait for a reply, after which the returned Deferred instance will be called with a NetBIOSTimeout exception. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of IP addresses in dotted notation (aaa.bbb.ccc.ddd). + On timeout, the errback function will be called with a Failure instance wrapping around a NetBIOSTimeout exception + """ + trn_id = random.randint(1, 0xFFFF) + while True: + if not self.pending_trns.has_key(trn_id): + break + else: + trn_id = (trn_id + 1) & 0xFFFF + + data = self.prepareNameQuery(trn_id, name) + if self.broadcast and not ip: + ip = '<broadcast>' + elif not ip: + self.log.warning('queryName: ip parameter is empty. OS might not transmit this query to the network') + + self.write(data, ip, port) + + d = defer.Deferred() + self.pending_trns[trn_id] = ( time.time()+timeout, name, d ) + return d + + def queryIPForName(self, ip, port = 137, timeout = 30): + """ + Send a query to the machine with *ip* and hopes that the machine will reply back with its name. + + The implementation of this function is contributed by Jason Anderson. + + :param string ip: If the NBNSProtocol instance was instianted with broadcast=True, then this parameter can be an empty string. We will leave it to the OS to determine an appropriate broadcast address. + If the NBNSProtocol instance was instianted with broadcast=False, then you should provide a target IP to send the query. + :param integer port: The NetBIOS-NS port (IANA standard defines this port to be 137). You should not touch this parameter unless you know what you are doing. + :param integer/float timeout: Number of seconds to wait for a reply, after which the method will return None + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of names of the machine at *ip*. + On timeout, the errback function will be called with a Failure instance wrapping around a NetBIOSTimeout exception + """ + trn_id = random.randint(1, 0xFFFF) + while True: + if not self.pending_trns.has_key(trn_id): + break + else: + trn_id = (trn_id + 1) & 0xFFFF + + data = self.prepareNetNameQuery(trn_id) + self.write(data, ip, port) + + d = defer.Deferred() + d2 = defer.Deferred() + d2.addErrback(d.errback) + + def stripCode(ret): + if ret is not None: # got valid response. Somehow the callback is also called when there is an error. + d.callback(map(lambda s: s[0], filter(lambda s: s[1] == TYPE_SERVER, ret))) + + d2.addCallback(stripCode) + self.pending_trns[trn_id] = ( time.time()+timeout, NAME_QUERY, d2 ) + return d + + def stopProtocol(self): + DatagramProtocol.stopProtocol(self) + + def cleanupPendingTrns(self): + now = time.time() + + # reply should have been received in the past + expired = filter(lambda (trn_id, (expiry_time, name, d)): expiry_time < now, self.pending_trns.iteritems()) + + # remove expired items from dict + call errback + def expire_item(item): + trn_id, (expiry_time, name, d) = item + + del self.pending_trns[trn_id] + try: + d.errback(NetBIOSTimeout(name)) + except: pass + + map(expire_item, expired) + + if self.transport: + reactor.callLater(1, self.cleanupPendingTrns) diff --git a/plugin.video.alfa/lib/sambatools/nmb/base.py b/plugin.video.alfa/lib/sambatools/nmb/base.py index 9526c0a8..0bbef825 100755 --- a/plugin.video.alfa/lib/sambatools/nmb/base.py +++ b/plugin.video.alfa/lib/sambatools/nmb/base.py @@ -1,179 +1,181 @@ -import logging - -from nmb_constants import * -from nmb_structs import * -from utils import encode_name - - -class NMBSession: - - log = logging.getLogger('NMB.NMBSession') - - def __init__(self, my_name, remote_name, host_type = TYPE_SERVER, is_direct_tcp = False): - self.my_name = my_name.upper() - self.remote_name = remote_name.upper() - self.host_type = host_type - self.data_buf = '' - - if is_direct_tcp: - self.data_nmb = DirectTCPSessionMessage() - self.sendNMBPacket = self._sendNMBPacket_DirectTCP - else: - self.data_nmb = NMBSessionMessage() - self.sendNMBPacket = self._sendNMBPacket_NetBIOS - - # - # Overridden Methods - # - - def write(self, data): - raise NotImplementedError - - def onNMBSessionMessage(self, flags, data): - pass - - def onNMBSessionOK(self): - pass - - def onNMBSessionFailed(self): - pass - - # - # Public Methods - # - - def feedData(self, data): - self.data_buf = self.data_buf + data - - offset = 0 - while True: - length = self.data_nmb.decode(self.data_buf, offset) - if length == 0: - break - elif length > 0: - offset += length - self._processNMBSessionPacket(self.data_nmb) - else: - raise NMBError - - if offset > 0: - self.data_buf = self.data_buf[offset:] - - def sendNMBMessage(self, data): - self.sendNMBPacket(SESSION_MESSAGE, data) - - def requestNMBSession(self): - my_name_encoded = encode_name(self.my_name, TYPE_WORKSTATION) - remote_name_encoded = encode_name(self.remote_name, self.host_type) - self.sendNMBPacket(SESSION_REQUEST, remote_name_encoded + my_name_encoded) - - # - # Protected Methods - # - - def _processNMBSessionPacket(self, packet): - if packet.type == SESSION_MESSAGE: - self.onNMBSessionMessage(packet.flags, packet.data) - elif packet.type == POSITIVE_SESSION_RESPONSE: - self.onNMBSessionOK() - elif packet.type == NEGATIVE_SESSION_RESPONSE: - self.onNMBSessionFailed() - else: - self.log.warning('Unrecognized NMB session type: 0x%02x', packet.type) - - def _sendNMBPacket_NetBIOS(self, packet_type, data): - length = len(data) - assert length <= 0x01FFFF - flags = 0 - if length > 0xFFFF: - flags |= 0x01 - length &= 0xFFFF - self.write(struct.pack('>BBH', packet_type, flags, length) + data) - - def _sendNMBPacket_DirectTCP(self, packet_type, data): - length = len(data) - assert length <= 0x00FFFFFF - self.write(struct.pack('>I', length) + data) - - -class NBNS: - - log = logging.getLogger('NMB.NBNS') - - HEADER_STRUCT_FORMAT = '>HHHHHH' - HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) - - def write(self, data, ip, port): - raise NotImplementedError - - def decodePacket(self, data): - if len(data) < self.HEADER_STRUCT_SIZE: - raise Exception - - trn_id, code, question_count, answer_count, authority_count, additional_count = struct.unpack(self.HEADER_STRUCT_FORMAT, data[:self.HEADER_STRUCT_SIZE]) - - is_response = bool((code >> 15) & 0x01) - opcode = (code >> 11) & 0x0F - flags = (code >> 4) & 0x7F - rcode = code & 0x0F - - if opcode == 0x0000 and is_response: - name_len = ord(data[self.HEADER_STRUCT_SIZE]) - offset = self.HEADER_STRUCT_SIZE+2+name_len+8 # constant 2 for the padding bytes before/after the Name and constant 8 for the Type, Class and TTL fields in the Answer section after the Name - record_count = (struct.unpack('>H', data[offset:offset+2])[0]) / 6 - - offset += 4 # Constant 4 for the Data Length and Flags field - ret = [ ] - for i in range(0, record_count): - ret.append('%d.%d.%d.%d' % struct.unpack('4B', (data[offset:offset + 4]))) - offset += 6 - return trn_id, ret - else: - return trn_id, None - - - def prepareNameQuery(self, trn_id, name, is_broadcast = True): - header = struct.pack(self.HEADER_STRUCT_FORMAT, - trn_id, (is_broadcast and 0x0110) or 0x0100, 1, 0, 0, 0) - payload = encode_name(name, 0x20) + '\x00\x20\x00\x01' - - return header + payload - - # - # Contributed by Jason Anderson - # - def decodeIPQueryPacket(self, data): - if len(data) < self.HEADER_STRUCT_SIZE: - raise Exception - - trn_id, code, question_count, answer_count, authority_count, additional_count = struct.unpack(self.HEADER_STRUCT_FORMAT, data[:self.HEADER_STRUCT_SIZE]) - - is_response = bool((code >> 15) & 0x01) - opcode = (code >> 11) & 0x0F - flags = (code >> 4) & 0x7F - rcode = code & 0x0F - numnames = struct.unpack('B', data[self.HEADER_STRUCT_SIZE + 44])[0] - - if numnames > 0: - ret = [ ] - offset = self.HEADER_STRUCT_SIZE + 45 - - for i in range(0, numnames): - mynme = data[offset:offset + 15] - mynme = mynme.strip() - ret.append(( mynme, ord(data[offset+15]) )) - offset += 18 - - return trn_id, ret - else: - return trn_id, None - - # - # Contributed by Jason Anderson - # - def prepareNetNameQuery(self, trn_id, is_broadcast = True): - header = struct.pack(self.HEADER_STRUCT_FORMAT, - trn_id, (is_broadcast and 0x0010) or 0x0000, 1, 0, 0, 0) - payload = encode_name('*', 0) + '\x00\x21\x00\x01' - - return header + payload + +import struct, logging, random +from nmb_constants import * +from nmb_structs import * +from utils import encode_name + +class NMBSession: + + log = logging.getLogger('NMB.NMBSession') + + def __init__(self, my_name, remote_name, host_type = TYPE_SERVER, is_direct_tcp = False): + self.my_name = my_name.upper() + self.remote_name = remote_name.upper() + self.host_type = host_type + self.data_buf = '' + + if is_direct_tcp: + self.data_nmb = DirectTCPSessionMessage() + self.sendNMBPacket = self._sendNMBPacket_DirectTCP + else: + self.data_nmb = NMBSessionMessage() + self.sendNMBPacket = self._sendNMBPacket_NetBIOS + + # + # Overridden Methods + # + + def write(self, data): + raise NotImplementedError + + def onNMBSessionMessage(self, flags, data): + pass + + def onNMBSessionOK(self): + pass + + def onNMBSessionFailed(self): + pass + + # + # Public Methods + # + + def feedData(self, data): + self.data_buf = self.data_buf + data + + offset = 0 + while True: + length = self.data_nmb.decode(self.data_buf, offset) + if length == 0: + break + elif length > 0: + offset += length + self._processNMBSessionPacket(self.data_nmb) + else: + raise NMBError + + if offset > 0: + self.data_buf = self.data_buf[offset:] + + def sendNMBMessage(self, data): + self.sendNMBPacket(SESSION_MESSAGE, data) + + def requestNMBSession(self): + my_name_encoded = encode_name(self.my_name, TYPE_WORKSTATION) + remote_name_encoded = encode_name(self.remote_name, self.host_type) + self.sendNMBPacket(SESSION_REQUEST, remote_name_encoded + my_name_encoded) + + # + # Protected Methods + # + + def _processNMBSessionPacket(self, packet): + if packet.type == SESSION_MESSAGE: + self.onNMBSessionMessage(packet.flags, packet.data) + elif packet.type == POSITIVE_SESSION_RESPONSE: + self.onNMBSessionOK() + elif packet.type == NEGATIVE_SESSION_RESPONSE: + self.onNMBSessionFailed() + elif packet.type == SESSION_KEEPALIVE: + # Discard keepalive packets - [RFC1002]: 5.2.2.1 + pass + else: + self.log.warning('Unrecognized NMB session type: 0x%02x', packet.type) + + def _sendNMBPacket_NetBIOS(self, packet_type, data): + length = len(data) + assert length <= 0x01FFFF + flags = 0 + if length > 0xFFFF: + flags |= 0x01 + length &= 0xFFFF + self.write(struct.pack('>BBH', packet_type, flags, length) + data) + + def _sendNMBPacket_DirectTCP(self, packet_type, data): + length = len(data) + assert length <= 0x00FFFFFF + self.write(struct.pack('>I', length) + data) + + +class NBNS: + + log = logging.getLogger('NMB.NBNS') + + HEADER_STRUCT_FORMAT = '>HHHHHH' + HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) + + def write(self, data, ip, port): + raise NotImplementedError + + def decodePacket(self, data): + if len(data) < self.HEADER_STRUCT_SIZE: + raise Exception + + trn_id, code, question_count, answer_count, authority_count, additional_count = struct.unpack(self.HEADER_STRUCT_FORMAT, data[:self.HEADER_STRUCT_SIZE]) + + is_response = bool((code >> 15) & 0x01) + opcode = (code >> 11) & 0x0F + flags = (code >> 4) & 0x7F + rcode = code & 0x0F + + if opcode == 0x0000 and is_response: + name_len = ord(data[self.HEADER_STRUCT_SIZE]) + offset = self.HEADER_STRUCT_SIZE+2+name_len+8 # constant 2 for the padding bytes before/after the Name and constant 8 for the Type, Class and TTL fields in the Answer section after the Name + record_count = (struct.unpack('>H', data[offset:offset+2])[0]) / 6 + + offset += 4 # Constant 4 for the Data Length and Flags field + ret = [ ] + for i in range(0, record_count): + ret.append('%d.%d.%d.%d' % struct.unpack('4B', (data[offset:offset + 4]))) + offset += 6 + return trn_id, ret + else: + return trn_id, None + + + def prepareNameQuery(self, trn_id, name, is_broadcast = True): + header = struct.pack(self.HEADER_STRUCT_FORMAT, + trn_id, (is_broadcast and 0x0110) or 0x0100, 1, 0, 0, 0) + payload = encode_name(name, 0x20) + '\x00\x20\x00\x01' + + return header + payload + + # + # Contributed by Jason Anderson + # + def decodeIPQueryPacket(self, data): + if len(data) < self.HEADER_STRUCT_SIZE: + raise Exception + + trn_id, code, question_count, answer_count, authority_count, additional_count = struct.unpack(self.HEADER_STRUCT_FORMAT, data[:self.HEADER_STRUCT_SIZE]) + + is_response = bool((code >> 15) & 0x01) + opcode = (code >> 11) & 0x0F + flags = (code >> 4) & 0x7F + rcode = code & 0x0F + numnames = struct.unpack('B', data[self.HEADER_STRUCT_SIZE + 44])[0] + + if numnames > 0: + ret = [ ] + offset = self.HEADER_STRUCT_SIZE + 45 + + for i in range(0, numnames): + mynme = data[offset:offset + 15] + mynme = mynme.strip() + ret.append(( mynme, ord(data[offset+15]) )) + offset += 18 + + return trn_id, ret + else: + return trn_id, None + + # + # Contributed by Jason Anderson + # + def prepareNetNameQuery(self, trn_id, is_broadcast = True): + header = struct.pack(self.HEADER_STRUCT_FORMAT, + trn_id, (is_broadcast and 0x0010) or 0x0000, 1, 0, 0, 0) + payload = encode_name('*', 0) + '\x00\x21\x00\x01' + + return header + payload diff --git a/plugin.video.alfa/lib/sambatools/nmb/nmb_constants.py b/plugin.video.alfa/lib/sambatools/nmb/nmb_constants.py index 1c576543..fcf6007a 100755 --- a/plugin.video.alfa/lib/sambatools/nmb/nmb_constants.py +++ b/plugin.video.alfa/lib/sambatools/nmb/nmb_constants.py @@ -1,38 +1,38 @@ - -# Default port for NetBIOS name service -NETBIOS_NS_PORT = 137 - -# Default port for NetBIOS session service -NETBIOS_SESSION_PORT = 139 - -# Owner Node Type Constants -NODE_B = 0x00 -NODE_P = 0x01 -NODE_M = 0x10 -NODE_RESERVED = 0x11 - -# Name Type Constants -TYPE_UNKNOWN = 0x01 -TYPE_WORKSTATION = 0x00 -TYPE_CLIENT = 0x03 -TYPE_SERVER = 0x20 -TYPE_DOMAIN_MASTER = 0x1B -TYPE_MASTER_BROWSER = 0x1D -TYPE_BROWSER = 0x1E - -TYPE_NAMES = { TYPE_UNKNOWN: 'Unknown', - TYPE_WORKSTATION: 'Workstation', - TYPE_CLIENT: 'Client', - TYPE_SERVER: 'Server', - TYPE_MASTER_BROWSER: 'Master Browser', - TYPE_BROWSER: 'Browser Server', - TYPE_DOMAIN_MASTER: 'Domain Master' - } - -# Values for Session Packet Type field in Session Packets -SESSION_MESSAGE = 0x00 -SESSION_REQUEST = 0x81 -POSITIVE_SESSION_RESPONSE = 0x82 -NEGATIVE_SESSION_RESPONSE = 0x83 -REGTARGET_SESSION_RESPONSE = 0x84 -SESSION_KEEPALIVE = 0x85 + +# Default port for NetBIOS name service +NETBIOS_NS_PORT = 137 + +# Default port for NetBIOS session service +NETBIOS_SESSION_PORT = 139 + +# Owner Node Type Constants +NODE_B = 0x00 +NODE_P = 0x01 +NODE_M = 0x10 +NODE_RESERVED = 0x11 + +# Name Type Constants +TYPE_UNKNOWN = 0x01 +TYPE_WORKSTATION = 0x00 +TYPE_CLIENT = 0x03 +TYPE_SERVER = 0x20 +TYPE_DOMAIN_MASTER = 0x1B +TYPE_MASTER_BROWSER = 0x1D +TYPE_BROWSER = 0x1E + +TYPE_NAMES = { TYPE_UNKNOWN: 'Unknown', + TYPE_WORKSTATION: 'Workstation', + TYPE_CLIENT: 'Client', + TYPE_SERVER: 'Server', + TYPE_MASTER_BROWSER: 'Master Browser', + TYPE_BROWSER: 'Browser Server', + TYPE_DOMAIN_MASTER: 'Domain Master' + } + +# Values for Session Packet Type field in Session Packets +SESSION_MESSAGE = 0x00 +SESSION_REQUEST = 0x81 +POSITIVE_SESSION_RESPONSE = 0x82 +NEGATIVE_SESSION_RESPONSE = 0x83 +REGTARGET_SESSION_RESPONSE = 0x84 +SESSION_KEEPALIVE = 0x85 diff --git a/plugin.video.alfa/lib/sambatools/nmb/nmb_structs.py b/plugin.video.alfa/lib/sambatools/nmb/nmb_structs.py index e78b87f4..71e603c3 100755 --- a/plugin.video.alfa/lib/sambatools/nmb/nmb_structs.py +++ b/plugin.video.alfa/lib/sambatools/nmb/nmb_structs.py @@ -1,69 +1,69 @@ - -import struct - -class NMBError(Exception): pass - - -class NotConnectedError(NMBError): - """ - Raisd when the underlying NMB connection has been disconnected or not connected yet - """ - pass - - -class NMBSessionMessage: - - HEADER_STRUCT_FORMAT = '>BBH' - HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) - - def __init__(self): - self.reset() - - def reset(self): - self.type = 0 - self.flags = 0 - self.data = '' - - def decode(self, data, offset): - data_len = len(data) - - if data_len < offset + self.HEADER_STRUCT_SIZE: - # Not enough data for decoding - return 0 - - self.reset() - self.type, self.flags, length = struct.unpack(self.HEADER_STRUCT_FORMAT, data[offset:offset+self.HEADER_STRUCT_SIZE]) - - if self.flags & 0x01: - length |= 0x010000 - - if data_len < offset + self.HEADER_STRUCT_SIZE + length: - return 0 - - self.data = data[offset+self.HEADER_STRUCT_SIZE:offset+self.HEADER_STRUCT_SIZE+length] - return self.HEADER_STRUCT_SIZE + length - - -class DirectTCPSessionMessage(NMBSessionMessage): - - HEADER_STRUCT_FORMAT = '>I' - HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) - - def decode(self, data, offset): - data_len = len(data) - - if data_len < offset + self.HEADER_STRUCT_SIZE: - # Not enough data for decoding - return 0 - - self.reset() - length = struct.unpack(self.HEADER_STRUCT_FORMAT, data[offset:offset+self.HEADER_STRUCT_SIZE])[0] - - if length >> 24 != 0: - raise NMBError("Invalid protocol header for Direct TCP session message") - - if data_len < offset + self.HEADER_STRUCT_SIZE + length: - return 0 - - self.data = data[offset+self.HEADER_STRUCT_SIZE:offset+self.HEADER_STRUCT_SIZE+length] - return self.HEADER_STRUCT_SIZE + length + +import struct + +class NMBError(Exception): pass + + +class NotConnectedError(NMBError): + """ + Raisd when the underlying NMB connection has been disconnected or not connected yet + """ + pass + + +class NMBSessionMessage: + + HEADER_STRUCT_FORMAT = '>BBH' + HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) + + def __init__(self): + self.reset() + + def reset(self): + self.type = 0 + self.flags = 0 + self.data = '' + + def decode(self, data, offset): + data_len = len(data) + + if data_len < offset + self.HEADER_STRUCT_SIZE: + # Not enough data for decoding + return 0 + + self.reset() + self.type, self.flags, length = struct.unpack(self.HEADER_STRUCT_FORMAT, data[offset:offset+self.HEADER_STRUCT_SIZE]) + + if self.flags & 0x01: + length |= 0x010000 + + if data_len < offset + self.HEADER_STRUCT_SIZE + length: + return 0 + + self.data = data[offset+self.HEADER_STRUCT_SIZE:offset+self.HEADER_STRUCT_SIZE+length] + return self.HEADER_STRUCT_SIZE + length + + +class DirectTCPSessionMessage(NMBSessionMessage): + + HEADER_STRUCT_FORMAT = '>I' + HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) + + def decode(self, data, offset): + data_len = len(data) + + if data_len < offset + self.HEADER_STRUCT_SIZE: + # Not enough data for decoding + return 0 + + self.reset() + length = struct.unpack(self.HEADER_STRUCT_FORMAT, data[offset:offset+self.HEADER_STRUCT_SIZE])[0] + + if length >> 24 != 0: + raise NMBError("Invalid protocol header for Direct TCP session message") + + if data_len < offset + self.HEADER_STRUCT_SIZE + length: + return 0 + + self.data = data[offset+self.HEADER_STRUCT_SIZE:offset+self.HEADER_STRUCT_SIZE+length] + return self.HEADER_STRUCT_SIZE + length diff --git a/plugin.video.alfa/lib/sambatools/nmb/utils.py b/plugin.video.alfa/lib/sambatools/nmb/utils.py index 1de7b1fe..45d625c2 100755 --- a/plugin.video.alfa/lib/sambatools/nmb/utils.py +++ b/plugin.video.alfa/lib/sambatools/nmb/utils.py @@ -1,50 +1,50 @@ -import re -import string - - -def encode_name(name, type, scope = None): - """ - Perform first and second level encoding of name as specified in RFC 1001 (Section 4) - """ - if name == '*': - name = name + '\0' * 15 - elif len(name) > 15: - name = name[:15] + chr(type) - else: - name = string.ljust(name, 15) + chr(type) - - def _do_first_level_encoding(m): - s = ord(m.group(0)) - return string.uppercase[s >> 4] + string.uppercase[s & 0x0f] - - encoded_name = chr(len(name) * 2) + re.sub('.', _do_first_level_encoding, name) - if scope: - encoded_scope = '' - for s in string.split(scope, '.'): - encoded_scope = encoded_scope + chr(len(s)) + s - return encoded_name + encoded_scope + '\0' - else: - return encoded_name + '\0' - - -def decode_name(name): - name_length = ord(name[0]) - assert name_length == 32 - - def _do_first_level_decoding(m): - s = m.group(0) - return chr(((ord(s[0]) - ord('A')) << 4) | (ord(s[1]) - ord('A'))) - - decoded_name = re.sub('..', _do_first_level_decoding, name[1:33]) - if name[33] == '\0': - return 34, decoded_name, '' - else: - decoded_domain = '' - offset = 34 - while 1: - domain_length = ord(name[offset]) - if domain_length == 0: - break - decoded_domain = '.' + name[offset:offset + domain_length] - offset = offset + domain_length - return offset + 1, decoded_name, decoded_domain + +import string, re + + +def encode_name(name, type, scope = None): + """ + Perform first and second level encoding of name as specified in RFC 1001 (Section 4) + """ + if name == '*': + name = name + '\0' * 15 + elif len(name) > 15: + name = name[:15] + chr(type) + else: + name = string.ljust(name, 15) + chr(type) + + def _do_first_level_encoding(m): + s = ord(m.group(0)) + return string.uppercase[s >> 4] + string.uppercase[s & 0x0f] + + encoded_name = chr(len(name) * 2) + re.sub('.', _do_first_level_encoding, name) + if scope: + encoded_scope = '' + for s in string.split(scope, '.'): + encoded_scope = encoded_scope + chr(len(s)) + s + return encoded_name + encoded_scope + '\0' + else: + return encoded_name + '\0' + + +def decode_name(name): + name_length = ord(name[0]) + assert name_length == 32 + + def _do_first_level_decoding(m): + s = m.group(0) + return chr(((ord(s[0]) - ord('A')) << 4) | (ord(s[1]) - ord('A'))) + + decoded_name = re.sub('..', _do_first_level_decoding, name[1:33]) + if name[33] == '\0': + return 34, decoded_name, '' + else: + decoded_domain = '' + offset = 34 + while 1: + domain_length = ord(name[offset]) + if domain_length == 0: + break + decoded_domain = '.' + name[offset:offset + domain_length] + offset = offset + domain_length + return offset + 1, decoded_name, decoded_domain diff --git a/plugin.video.alfa/lib/sambatools/smb/SMBConnection.py b/plugin.video.alfa/lib/sambatools/smb/SMBConnection.py index 7940d7f2..603c48df 100755 --- a/plugin.video.alfa/lib/sambatools/smb/SMBConnection.py +++ b/plugin.video.alfa/lib/sambatools/smb/SMBConnection.py @@ -1,582 +1,630 @@ -import select -import socket -import types - -from base import SMB, NotConnectedError, SMBTimeout -from smb_structs import * - - -class SMBConnection(SMB): - - log = logging.getLogger('SMB.SMBConnection') - - #: SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. - SIGN_NEVER = 0 - #: SMB messages will be signed when remote server supports signing but not requires signing. - SIGN_WHEN_SUPPORTED = 1 - #: SMB messages will only be signed when remote server requires signing. - SIGN_WHEN_REQUIRED = 2 - - def __init__(self, username, password, my_name, remote_name, domain = '', use_ntlm_v2 = True, sign_options = SIGN_WHEN_REQUIRED, is_direct_tcp = False): - """ - Create a new SMBConnection instance. - - *username* and *password* are the user credentials required to authenticate the underlying SMB connection with the remote server. - File operations can only be proceeded after the connection has been authenticated successfully. - - Note that you need to call *connect* method to actually establish the SMB connection to the remote server and perform authentication. - - The default TCP port for most SMB/CIFS servers using NetBIOS over TCP/IP is 139. - Some newer server installations might also support Direct hosting of SMB over TCP/IP; for these servers, the default TCP port is 445. - - :param string my_name: The local NetBIOS machine name that will identify where this connection is originating from. - You can freely choose a name as long as it contains a maximum of 15 alphanumeric characters and does not contain spaces and any of ``\/:*?";|+`` - :param string remote_name: The NetBIOS machine name of the remote server. - On windows, you can find out the machine name by right-clicking on the "My Computer" and selecting "Properties". - This parameter must be the same as what has been configured on the remote server, or else the connection will be rejected. - :param string domain: The network domain. On windows, it is known as the workgroup. Usually, it is safe to leave this parameter as an empty string. - :param boolean use_ntlm_v2: Indicates whether pysmb should be NTLMv1 or NTLMv2 authentication algorithm for authentication. - The choice of NTLMv1 and NTLMv2 is configured on the remote server, and there is no mechanism to auto-detect which algorithm has been configured. - Hence, we can only "guess" or try both algorithms. - On Sambda, Windows Vista and Windows 7, NTLMv2 is enabled by default. On Windows XP, we can use NTLMv1 before NTLMv2. - :param int sign_options: Determines whether SMB messages will be signed. Default is *SIGN_WHEN_REQUIRED*. - If *SIGN_WHEN_REQUIRED* (value=2), SMB messages will only be signed when remote server requires signing. - If *SIGN_WHEN_SUPPORTED* (value=1), SMB messages will be signed when remote server supports signing but not requires signing. - If *SIGN_NEVER* (value=0), SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. - :param boolean is_direct_tcp: Controls whether the NetBIOS over TCP/IP (is_direct_tcp=False) or the newer Direct hosting of SMB over TCP/IP (is_direct_tcp=True) will be used for the communication. - The default parameter is False which will use NetBIOS over TCP/IP for wider compatibility (TCP port: 139). - """ - SMB.__init__(self, username, password, my_name, remote_name, domain, use_ntlm_v2, sign_options, is_direct_tcp) - self.sock = None - self.auth_result = None - self.is_busy = False - self.is_direct_tcp = is_direct_tcp - - # - # SMB (and its superclass) Methods - # - - def onAuthOK(self): - self.auth_result = True - - def onAuthFailed(self): - self.auth_result = False - - def write(self, data): - assert self.sock - data_len = len(data) - total_sent = 0 - while total_sent < data_len: - sent = self.sock.send(data[total_sent:]) - if sent == 0: - raise NotConnectedError('Server disconnected') - total_sent = total_sent + sent - - # - # Misc Properties - # - - @property - def isUsingSMB2(self): - """A convenient property to return True if the underlying SMB connection is using SMB2 protocol.""" - return self.is_using_smb2 - - - # - # Public Methods - # - - def connect(self, ip, port = 139, sock_family = socket.AF_INET, timeout = 60): - """ - Establish the SMB connection to the remote SMB/CIFS server. - - You must call this method before attempting any of the file operations with the remote server. - This method will block until the SMB connection has attempted at least one authentication. - - :return: A boolean value indicating the result of the authentication atttempt: True if authentication is successful; False, if otherwise. - """ - if self.sock: - self.sock.close() - - self.auth_result = None - self.sock = socket.socket(sock_family) - self.sock.settimeout(timeout) - self.sock.connect(( ip, port )) - - self.is_busy = True - try: - if not self.is_direct_tcp: - self.requestNMBSession() - else: - self.onNMBSessionOK() - while self.auth_result is None: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return self.auth_result - - def close(self): - """ - Terminate the SMB connection (if it has been started) and release any sources held by the underlying socket. - """ - if self.sock: - self.sock.close() - self.sock = None - - def listShares(self, timeout = 30): - """ - Retrieve a list of shared resources on remote server. - - :return: A list of :doc:`smb.base.SharedDevice<smb_SharedDevice>` instances describing the shared resource - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - results = [ ] - - def cb(entries): - self.is_busy = False - results.extend(entries) - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._listShares(cb, eb, timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return results - - def listPath(self, service_name, path, - search = SMB_FILE_ATTRIBUTE_READONLY | SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_DIRECTORY | SMB_FILE_ATTRIBUTE_ARCHIVE, - pattern = '*', timeout = 30): - """ - Retrieve a directory listing of files/folders at *path* - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: path relative to the *service_name* where we are interested to learn about its files/sub-folders. - :param integer search: integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py). - The default *search* value will query for all read-only, hidden, system, archive files and directories. - :param string/unicode pattern: the filter to apply to the results before returning to the client. - :return: A list of :doc:`smb.base.SharedFile<smb_SharedFile>` instances. - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - results = [ ] - - def cb(entries): - self.is_busy = False - results.extend(entries) - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._listPath(service_name, path, cb, eb, search = search, pattern = pattern, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return results - - def listSnapshots(self, service_name, path, timeout = 30): - """ - Retrieve a list of available snapshots (shadow copies) for *path*. - - Note that snapshot features are only supported on Windows Vista Business, Enterprise and Ultimate, and on all Windows 7 editions. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: path relative to the *service_name* where we are interested in the list of available snapshots - :return: A list of python *datetime.DateTime* instances in GMT/UTC time zone - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - results = [ ] - - def cb(entries): - self.is_busy = False - results.extend(entries) - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._listSnapshots(service_name, path, cb, eb, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return results - - def getAttributes(self, service_name, path, timeout = 30): - """ - Retrieve information about the file at *path* on the *service_name*. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. - :return: A :doc:`smb.base.SharedFile<smb_SharedFile>` instance containing the attributes of the file. - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - results = [ ] - - def cb(info): - self.is_busy = False - results.append(info) - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._getAttributes(service_name, path, cb, eb, timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return results[0] - - def retrieveFile(self, service_name, path, file_obj, timeout = 30): - """ - Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. - - Use *retrieveFileFromOffset()* method if you wish to specify the offset to read from the remote *path* and/or the number of bytes to write to the *file_obj*. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. - :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* until EOF is received from the remote service. - :return: A 2-element tuple of ( file attributes of the file on server, number of bytes written to *file_obj* ). - The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) - """ - return self.retrieveFileFromOffset(service_name, path, file_obj, 0L, -1L, timeout) - - def retrieveFileFromOffset(self, service_name, path, file_obj, offset = 0L, max_length = -1L, timeout = 30): - """ - Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. - :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* up to *max_length* number of bytes. - :param integer/long offset: the offset in the remote *path* where the first byte will be read and written to *file_obj*. Must be either zero or a positive integer/long value. - :param integer/long max_length: maximum number of bytes to read from the remote *path* and write to the *file_obj*. Specify a negative value to read from *offset* to the EOF. - If zero, the method returns immediately after the file is opened successfully for reading. - :return: A 2-element tuple of ( file attributes of the file on server, number of bytes written to *file_obj* ). - The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - results = [ ] - - def cb(r): - self.is_busy = False - results.append(r[1:]) - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._retrieveFileFromOffset(service_name, path, file_obj, cb, eb, offset, max_length, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return results[0] - - def storeFile(self, service_name, path, file_obj, timeout = 30): - """ - Store the contents of the *file_obj* at *path* on the *service_name*. - If the file already exists on the remote server, it will be truncated and overwritten. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. Otherwise, it will be overwritten. - If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be raised. - :param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF. - :return: Number of bytes uploaded - """ - return self.storeFileFromOffset(service_name, path, file_obj, 0L, True, timeout) - - def storeFileFromOffset(self, service_name, path, file_obj, offset = 0L, truncate = False, timeout = 30): - """ - Store the contents of the *file_obj* at *path* on the *service_name*. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. - If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be raised. - :param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF. - :param offset: Long integer value which specifies the offset in the remote server to start writing. First byte of the file is 0. - :param truncate: Boolean value. If True and the file exists on the remote server, it will be truncated first before writing. Default is False. - :return: the file position where the next byte will be written. - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - results = [ ] - - def cb(r): - self.is_busy = False - results.append(r[1]) - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._storeFileFromOffset(service_name, path, file_obj, cb, eb, offset, truncate = truncate, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return results[0] - - def deleteFiles(self, service_name, path_file_pattern, timeout = 30): - """ - Delete one or more regular files. It supports the use of wildcards in file names, allowing for deletion of multiple files in a single request. - - :param string/unicode service_name: Contains the name of the shared folder. - :param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name. - Wildcards may be used in th filename component of the path. - If your path/filename contains non-English characters, you must pass in an unicode string. - :return: None - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - def cb(r): - self.is_busy = False - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._deleteFiles(service_name, path_file_pattern, cb, eb, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - def resetFileAttributes(self, service_name, path_file_pattern, timeout = 30): - """ - Reset file attributes of one or more regular files or folders. - It supports the use of wildcards in file names, allowing for unlocking of multiple files/folders in a single request. - This function is very helpful when deleting files/folders that are read-only. - Note: this function is currently only implemented for SMB2! Technically, it sets the FILE_ATTRIBUTE_NORMAL flag, therefore clearing all other flags. (See https://msdn.microsoft.com/en-us/library/cc232110.aspx for further information) - :param string/unicode service_name: Contains the name of the shared folder. - :param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name. - Wildcards may be used in th filename component of the path. - If your path/filename contains non-English characters, you must pass in an unicode string. - :return: None - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - def cb(r): - self.is_busy = False - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._resetFileAttributes(service_name, path_file_pattern, cb, eb, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - def createDirectory(self, service_name, path, timeout = 30): - """ - Creates a new directory *path* on the *service_name*. - - :param string/unicode service_name: Contains the name of the shared folder. - :param string/unicode path: The path of the new folder (relative to) the shared folder. - If the path contains non-English characters, an unicode string must be used to pass in the path. - :return: None - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - def cb(r): - self.is_busy = False - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._createDirectory(service_name, path, cb, eb, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - def deleteDirectory(self, service_name, path, timeout = 30): - """ - Delete the empty folder at *path* on *service_name* - - :param string/unicode service_name: Contains the name of the shared folder. - :param string/unicode path: The path of the to-be-deleted folder (relative to) the shared folder. - If the path contains non-English characters, an unicode string must be used to pass in the path. - :return: None - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - def cb(r): - self.is_busy = False - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._deleteDirectory(service_name, path, cb, eb, timeout = timeout) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - def rename(self, service_name, old_path, new_path, timeout = 30): - """ - Rename a file or folder at *old_path* to *new_path* shared at *service_name*. Note that this method cannot be used to rename file/folder across different shared folders - - *old_path* and *new_path* are string/unicode referring to the old and new path of the renamed resources (relative to) the shared folder. - If the path contains non-English characters, an unicode string must be used to pass in the path. - - :param string/unicode service_name: Contains the name of the shared folder. - :return: None - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - def cb(r): - self.is_busy = False - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._rename(service_name, old_path, new_path, cb, eb) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - def echo(self, data, timeout = 10): - """ - Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*. - - :param string data: Data to send to the remote server. - :return: The *data* parameter - """ - if not self.sock: - raise NotConnectedError('Not connected to server') - - results = [ ] - - def cb(r): - self.is_busy = False - results.append(r) - - def eb(failure): - self.is_busy = False - raise failure - - self.is_busy = True - try: - self._echo(data, cb, eb) - while self.is_busy: - self._pollForNetBIOSPacket(timeout) - finally: - self.is_busy = False - - return results[0] - - # - # Protected Methods - # - - def _pollForNetBIOSPacket(self, timeout): - expiry_time = time.time() + timeout - read_len = 4 - data = '' - - while read_len > 0: - try: - if expiry_time < time.time(): - raise SMBTimeout - - ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], timeout) - if not ready: - raise SMBTimeout - - d = self.sock.recv(read_len) - if len(d) == 0: - raise NotConnectedError - - data = data + d - read_len -= len(d) - except select.error, ex: - if type(ex) is types.TupleType: - if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: - raise ex - else: - raise ex - - type, flags, length = struct.unpack('>BBH', data) - if flags & 0x01: - length = length | 0x10000 - - read_len = length - while read_len > 0: - try: - if expiry_time < time.time(): - raise SMBTimeout - - ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], timeout) - if not ready: - raise SMBTimeout - - d = self.sock.recv(read_len) - if len(d) == 0: - raise NotConnectedError - - data = data + d - read_len -= len(d) - except select.error, ex: - if type(ex) is types.TupleType: - if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: - raise ex - else: - raise ex - - self.feedData(data) + +import os, logging, select, socket, struct, errno +from smb_constants import * +from smb_structs import * +from base import SMB, NotConnectedError, NotReadyError, SMBTimeout + + +class SMBConnection(SMB): + + log = logging.getLogger('SMB.SMBConnection') + + #: SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. + SIGN_NEVER = 0 + #: SMB messages will be signed when remote server supports signing but not requires signing. + SIGN_WHEN_SUPPORTED = 1 + #: SMB messages will only be signed when remote server requires signing. + SIGN_WHEN_REQUIRED = 2 + + def __init__(self, username, password, my_name, remote_name, domain = '', use_ntlm_v2 = True, sign_options = SIGN_WHEN_REQUIRED, is_direct_tcp = False): + """ + Create a new SMBConnection instance. + + *username* and *password* are the user credentials required to authenticate the underlying SMB connection with the remote server. + File operations can only be proceeded after the connection has been authenticated successfully. + + Note that you need to call *connect* method to actually establish the SMB connection to the remote server and perform authentication. + + The default TCP port for most SMB/CIFS servers using NetBIOS over TCP/IP is 139. + Some newer server installations might also support Direct hosting of SMB over TCP/IP; for these servers, the default TCP port is 445. + + :param string my_name: The local NetBIOS machine name that will identify where this connection is originating from. + You can freely choose a name as long as it contains a maximum of 15 alphanumeric characters and does not contain spaces and any of ``\/:*?";|+`` + :param string remote_name: The NetBIOS machine name of the remote server. + On windows, you can find out the machine name by right-clicking on the "My Computer" and selecting "Properties". + This parameter must be the same as what has been configured on the remote server, or else the connection will be rejected. + :param string domain: The network domain. On windows, it is known as the workgroup. Usually, it is safe to leave this parameter as an empty string. + :param boolean use_ntlm_v2: Indicates whether pysmb should be NTLMv1 or NTLMv2 authentication algorithm for authentication. + The choice of NTLMv1 and NTLMv2 is configured on the remote server, and there is no mechanism to auto-detect which algorithm has been configured. + Hence, we can only "guess" or try both algorithms. + On Sambda, Windows Vista and Windows 7, NTLMv2 is enabled by default. On Windows XP, we can use NTLMv1 before NTLMv2. + :param int sign_options: Determines whether SMB messages will be signed. Default is *SIGN_WHEN_REQUIRED*. + If *SIGN_WHEN_REQUIRED* (value=2), SMB messages will only be signed when remote server requires signing. + If *SIGN_WHEN_SUPPORTED* (value=1), SMB messages will be signed when remote server supports signing but not requires signing. + If *SIGN_NEVER* (value=0), SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. + :param boolean is_direct_tcp: Controls whether the NetBIOS over TCP/IP (is_direct_tcp=False) or the newer Direct hosting of SMB over TCP/IP (is_direct_tcp=True) will be used for the communication. + The default parameter is False which will use NetBIOS over TCP/IP for wider compatibility (TCP port: 139). + """ + SMB.__init__(self, username, password, my_name, remote_name, domain, use_ntlm_v2, sign_options, is_direct_tcp) + self.sock = None + self.auth_result = None + self.is_busy = False + self.is_direct_tcp = is_direct_tcp + + # + # SMB (and its superclass) Methods + # + + def onAuthOK(self): + self.auth_result = True + + def onAuthFailed(self): + self.auth_result = False + + def write(self, data): + assert self.sock + data_len = len(data) + total_sent = 0 + while total_sent < data_len: + sent = self.sock.send(data[total_sent:]) + if sent == 0: + raise NotConnectedError('Server disconnected') + total_sent = total_sent + sent + + # + # Misc Properties + # + + @property + def isUsingSMB2(self): + """A convenient property to return True if the underlying SMB connection is using SMB2 protocol.""" + return self.is_using_smb2 + + + # + # Public Methods + # + + def connect(self, ip, port = 139, sock_family = socket.AF_INET, timeout = 60): + """ + Establish the SMB connection to the remote SMB/CIFS server. + + You must call this method before attempting any of the file operations with the remote server. + This method will block until the SMB connection has attempted at least one authentication. + + :return: A boolean value indicating the result of the authentication atttempt: True if authentication is successful; False, if otherwise. + """ + if self.sock: + self.sock.close() + + self.auth_result = None + self.sock = socket.socket(sock_family) + self.sock.settimeout(timeout) + self.sock.connect(( ip, port )) + + self.is_busy = True + try: + if not self.is_direct_tcp: + self.requestNMBSession() + else: + self.onNMBSessionOK() + while self.auth_result is None: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return self.auth_result + + def close(self): + """ + Terminate the SMB connection (if it has been started) and release any sources held by the underlying socket. + """ + if self.sock: + self.sock.close() + self.sock = None + + def listShares(self, timeout = 30): + """ + Retrieve a list of shared resources on remote server. + + :return: A list of :doc:`smb.base.SharedDevice<smb_SharedDevice>` instances describing the shared resource + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(entries): + self.is_busy = False + results.extend(entries) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._listShares(cb, eb, timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results + + def listPath(self, service_name, path, + search = SMB_FILE_ATTRIBUTE_READONLY | SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_DIRECTORY | SMB_FILE_ATTRIBUTE_ARCHIVE | SMB_FILE_ATTRIBUTE_INCL_NORMAL, + pattern = '*', timeout = 30): + """ + Retrieve a directory listing of files/folders at *path* + + For simplicity, pysmb defines a "normal" file as a file entry that is not read-only, not hidden, not system, not archive and not a directory. + It ignores other attributes like compression, indexed, sparse, temporary and encryption. + + Note that the default search parameter will query for all read-only (SMB_FILE_ATTRIBUTE_READONLY), hidden (SMB_FILE_ATTRIBUTE_HIDDEN), + system (SMB_FILE_ATTRIBUTE_SYSTEM), archive (SMB_FILE_ATTRIBUTE_ARCHIVE), normal (SMB_FILE_ATTRIBUTE_INCL_NORMAL) files + and directories (SMB_FILE_ATTRIBUTE_DIRECTORY). + If you do not need to include "normal" files in the result, define your own search parameter without the SMB_FILE_ATTRIBUTE_INCL_NORMAL constant. + SMB_FILE_ATTRIBUTE_NORMAL should be used by itself and not be used with other bit constants. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: path relative to the *service_name* where we are interested to learn about its files/sub-folders. + :param integer search: integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py). + :param string/unicode pattern: the filter to apply to the results before returning to the client. + :return: A list of :doc:`smb.base.SharedFile<smb_SharedFile>` instances. + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(entries): + self.is_busy = False + results.extend(entries) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._listPath(service_name, path, cb, eb, search = search, pattern = pattern, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results + + def listSnapshots(self, service_name, path, timeout = 30): + """ + Retrieve a list of available snapshots (shadow copies) for *path*. + + Note that snapshot features are only supported on Windows Vista Business, Enterprise and Ultimate, and on all Windows 7 editions. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: path relative to the *service_name* where we are interested in the list of available snapshots + :return: A list of python *datetime.DateTime* instances in GMT/UTC time zone + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(entries): + self.is_busy = False + results.extend(entries) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._listSnapshots(service_name, path, cb, eb, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results + + def getAttributes(self, service_name, path, timeout = 30): + """ + Retrieve information about the file at *path* on the *service_name*. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. + :return: A :doc:`smb.base.SharedFile<smb_SharedFile>` instance containing the attributes of the file. + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(info): + self.is_busy = False + results.append(info) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._getAttributes(service_name, path, cb, eb, timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results[0] + + def getSecurity(self, service_name, path, timeout = 30): + """ + Retrieve the security descriptor of the file at *path* on the *service_name*. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. + :return: A :class:`smb.security_descriptors.SecurityDescriptor` instance containing the security information of the file. + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(info): + self.is_busy = False + results.append(info) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._getSecurity(service_name, path, cb, eb, timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results[0] + + def retrieveFile(self, service_name, path, file_obj, timeout = 30): + """ + Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. + + Use *retrieveFileFromOffset()* method if you wish to specify the offset to read from the remote *path* and/or the number of bytes to write to the *file_obj*. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. + :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* until EOF is received from the remote service. + :return: A 2-element tuple of ( file attributes of the file on server, number of bytes written to *file_obj* ). + The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) + """ + return self.retrieveFileFromOffset(service_name, path, file_obj, 0L, -1L, timeout) + + def retrieveFileFromOffset(self, service_name, path, file_obj, offset = 0L, max_length = -1L, timeout = 30): + """ + Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. + :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* up to *max_length* number of bytes. + :param integer/long offset: the offset in the remote *path* where the first byte will be read and written to *file_obj*. Must be either zero or a positive integer/long value. + :param integer/long max_length: maximum number of bytes to read from the remote *path* and write to the *file_obj*. Specify a negative value to read from *offset* to the EOF. + If zero, the method returns immediately after the file is opened successfully for reading. + :return: A 2-element tuple of ( file attributes of the file on server, number of bytes written to *file_obj* ). + The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(r): + self.is_busy = False + results.append(r[1:]) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._retrieveFileFromOffset(service_name, path, file_obj, cb, eb, offset, max_length, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results[0] + + def storeFile(self, service_name, path, file_obj, timeout = 30): + """ + Store the contents of the *file_obj* at *path* on the *service_name*. + If the file already exists on the remote server, it will be truncated and overwritten. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. Otherwise, it will be overwritten. + If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be raised. + :param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF. + :return: Number of bytes uploaded + """ + return self.storeFileFromOffset(service_name, path, file_obj, 0L, True, timeout) + + def storeFileFromOffset(self, service_name, path, file_obj, offset = 0L, truncate = False, timeout = 30): + """ + Store the contents of the *file_obj* at *path* on the *service_name*. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. + If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be raised. + :param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF. + :param offset: Long integer value which specifies the offset in the remote server to start writing. First byte of the file is 0. + :param truncate: Boolean value. If True and the file exists on the remote server, it will be truncated first before writing. Default is False. + :return: the file position where the next byte will be written. + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(r): + self.is_busy = False + results.append(r[1]) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._storeFileFromOffset(service_name, path, file_obj, cb, eb, offset, truncate = truncate, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results[0] + + def deleteFiles(self, service_name, path_file_pattern, timeout = 30): + """ + Delete one or more regular files. It supports the use of wildcards in file names, allowing for deletion of multiple files in a single request. + + :param string/unicode service_name: Contains the name of the shared folder. + :param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name. + Wildcards may be used in th filename component of the path. + If your path/filename contains non-English characters, you must pass in an unicode string. + :return: None + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + def cb(r): + self.is_busy = False + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._deleteFiles(service_name, path_file_pattern, cb, eb, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + def resetFileAttributes(self, service_name, path_file_pattern, timeout = 30): + """ + Reset file attributes of one or more regular files or folders. + It supports the use of wildcards in file names, allowing for unlocking of multiple files/folders in a single request. + This function is very helpful when deleting files/folders that are read-only. + Note: this function is currently only implemented for SMB2! Technically, it sets the FILE_ATTRIBUTE_NORMAL flag, therefore clearing all other flags. (See https://msdn.microsoft.com/en-us/library/cc232110.aspx for further information) + + :param string/unicode service_name: Contains the name of the shared folder. + :param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name. + Wildcards may be used in the filename component of the path. + If your path/filename contains non-English characters, you must pass in an unicode string. + :return: None + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + def cb(r): + self.is_busy = False + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._resetFileAttributes(service_name, path_file_pattern, cb, eb, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + def createDirectory(self, service_name, path, timeout = 30): + """ + Creates a new directory *path* on the *service_name*. + + :param string/unicode service_name: Contains the name of the shared folder. + :param string/unicode path: The path of the new folder (relative to) the shared folder. + If the path contains non-English characters, an unicode string must be used to pass in the path. + :return: None + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + def cb(r): + self.is_busy = False + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._createDirectory(service_name, path, cb, eb, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + def deleteDirectory(self, service_name, path, timeout = 30): + """ + Delete the empty folder at *path* on *service_name* + + :param string/unicode service_name: Contains the name of the shared folder. + :param string/unicode path: The path of the to-be-deleted folder (relative to) the shared folder. + If the path contains non-English characters, an unicode string must be used to pass in the path. + :return: None + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + def cb(r): + self.is_busy = False + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._deleteDirectory(service_name, path, cb, eb, timeout = timeout) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + def rename(self, service_name, old_path, new_path, timeout = 30): + """ + Rename a file or folder at *old_path* to *new_path* shared at *service_name*. Note that this method cannot be used to rename file/folder across different shared folders + + *old_path* and *new_path* are string/unicode referring to the old and new path of the renamed resources (relative to) the shared folder. + If the path contains non-English characters, an unicode string must be used to pass in the path. + + :param string/unicode service_name: Contains the name of the shared folder. + :return: None + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + def cb(r): + self.is_busy = False + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._rename(service_name, old_path, new_path, cb, eb) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + def echo(self, data, timeout = 10): + """ + Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*. + + :param bytes data: Data to send to the remote server. Must be a bytes object. + :return: The *data* parameter + """ + if not self.sock: + raise NotConnectedError('Not connected to server') + + results = [ ] + + def cb(r): + self.is_busy = False + results.append(r) + + def eb(failure): + self.is_busy = False + raise failure + + self.is_busy = True + try: + self._echo(data, cb, eb) + while self.is_busy: + self._pollForNetBIOSPacket(timeout) + finally: + self.is_busy = False + + return results[0] + + # + # Protected Methods + # + + def _pollForNetBIOSPacket(self, timeout): + expiry_time = time.time() + timeout + read_len = 4 + data = '' + + while read_len > 0: + try: + if expiry_time < time.time(): + raise SMBTimeout + + ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], timeout) + if not ready: + raise SMBTimeout + + d = self.sock.recv(read_len) + if len(d) == 0: + raise NotConnectedError + + data = data + d + read_len -= len(d) + except select.error, ex: + if isinstance(ex, types.TupleType): + if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: + raise ex + else: + raise ex + + type_, flags, length = struct.unpack('>BBH', data) + if type_ == 0x0: + # This is a Direct TCP packet + # The length is specified in the header from byte 8. (0-indexed) + # we read a structure assuming NBT, so to get the real length + # combine the length and flag fields together + length = length + (flags << 16) + else: + # This is a NetBIOS over TCP (NBT) packet + # The length is specified in the header from byte 16. (0-indexed) + if flags & 0x01: + length = length | 0x10000 + + read_len = length + while read_len > 0: + try: + if expiry_time < time.time(): + raise SMBTimeout + + ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], timeout) + if not ready: + raise SMBTimeout + + d = self.sock.recv(read_len) + if len(d) == 0: + raise NotConnectedError + + data = data + d + read_len -= len(d) + except select.error, ex: + if isinstance(ex, types.TupleType): + if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: + raise ex + else: + raise ex + + self.feedData(data) diff --git a/plugin.video.alfa/lib/sambatools/smb/SMBHandler.py b/plugin.video.alfa/lib/sambatools/smb/SMBHandler.py index f8685f73..137e943a 100755 --- a/plugin.video.alfa/lib/sambatools/smb/SMBHandler.py +++ b/plugin.video.alfa/lib/sambatools/smb/SMBHandler.py @@ -1,102 +1,97 @@ -import mimetools -import mimetypes -import os -import socket -import sys -import tempfile -import urllib2 -from urllib import (unquote, addinfourl, splitport, splitattr, splituser, splitpasswd) - -from nmb.NetBIOS import NetBIOS - -from smb.SMBConnection import SMBConnection - -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO - -USE_NTLM = True -MACHINE_NAME = None - -class SMBHandler(urllib2.BaseHandler): - - def smb_open(self, req): - global USE_NTLM, MACHINE_NAME - - host = req.get_host() - if not host: - raise urllib2.URLError('SMB error: no host given') - host, port = splitport(host) - if port is None: - port = 139 - else: - port = int(port) - - # username/password handling - user, host = splituser(host) - if user: - user, passwd = splitpasswd(user) - else: - passwd = None - host = unquote(host) - user = user or '' - - domain = '' - if ';' in user: - domain, user = user.split(';', 1) - - passwd = passwd or '' - myname = MACHINE_NAME or self.generateClientMachineName() - - n = NetBIOS() - names = n.queryIPForName(host) - if names: - server_name = names[0] - else: - raise urllib2.URLError('SMB error: Hostname does not reply back with its machine name') - - path, attrs = splitattr(req.get_selector()) - if path.startswith('/'): - path = path[1:] - dirs = path.split('/') - dirs = map(unquote, dirs) - service, path = dirs[0], '/'.join(dirs[1:]) - - try: - conn = SMBConnection(user, passwd, myname, server_name, domain=domain, use_ntlm_v2 = USE_NTLM) - conn.connect(host, port) - - if req.has_data(): - data_fp = req.get_data() - filelen = conn.storeFile(service, path, data_fp) - - headers = "Content-length: 0\n" - fp = StringIO("") - else: - fp = self.createTempFile() - file_attrs, retrlen = conn.retrieveFile(service, path, fp) - fp.seek(0) - - headers = "" - mtype = mimetypes.guess_type(req.get_full_url())[0] - if mtype: - headers += "Content-type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-length: %d\n" % retrlen - - sf = StringIO(headers) - headers = mimetools.Message(sf) - - return addinfourl(fp, headers, req.get_full_url()) - except Exception, ex: - raise urllib2.URLError, ('smb error: %s' % ex), sys.exc_info()[2] - - def createTempFile(self): - return tempfile.TemporaryFile() - - def generateClientMachineName(self): - hostname = socket.gethostname() - if hostname: - return hostname.split('.')[0] - return 'SMB%d' % os.getpid() + +import os, sys, socket, urllib2, mimetypes, mimetools, tempfile +from urllib import (unwrap, unquote, splittype, splithost, quote, + addinfourl, splitport, splittag, + splitattr, ftpwrapper, splituser, splitpasswd, splitvalue) +from nmb.NetBIOS import NetBIOS +from smb.SMBConnection import SMBConnection + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +USE_NTLM = True +MACHINE_NAME = None + +class SMBHandler(urllib2.BaseHandler): + + def smb_open(self, req): + global USE_NTLM, MACHINE_NAME + + host = req.get_host() + if not host: + raise urllib2.URLError('SMB error: no host given') + host, port = splitport(host) + if port is None: + port = 139 + else: + port = int(port) + + # username/password handling + user, host = splituser(host) + if user: + user, passwd = splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = user or '' + + domain = '' + if ';' in user: + domain, user = user.split(';', 1) + + passwd = passwd or '' + myname = MACHINE_NAME or self.generateClientMachineName() + + n = NetBIOS() + names = n.queryIPForName(host) + if names: + server_name = names[0] + else: + raise urllib2.URLError('SMB error: Hostname does not reply back with its machine name') + + path, attrs = splitattr(req.get_selector()) + if path.startswith('/'): + path = path[1:] + dirs = path.split('/') + dirs = map(unquote, dirs) + service, path = dirs[0], '/'.join(dirs[1:]) + + try: + conn = SMBConnection(user, passwd, myname, server_name, domain=domain, use_ntlm_v2 = USE_NTLM) + conn.connect(host, port) + + if req.has_data(): + data_fp = req.get_data() + filelen = conn.storeFile(service, path, data_fp) + + headers = "Content-length: 0\n" + fp = StringIO("") + else: + fp = self.createTempFile() + file_attrs, retrlen = conn.retrieveFile(service, path, fp) + fp.seek(0) + + headers = "" + mtype = mimetypes.guess_type(req.get_full_url())[0] + if mtype: + headers += "Content-type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-length: %d\n" % retrlen + + sf = StringIO(headers) + headers = mimetools.Message(sf) + + return addinfourl(fp, headers, req.get_full_url()) + except Exception, ex: + raise urllib2.URLError, ('smb error: %s' % ex), sys.exc_info()[2] + + def createTempFile(self): + return tempfile.TemporaryFile() + + def generateClientMachineName(self): + hostname = socket.gethostname() + if hostname: + return hostname.split('.')[0] + return 'SMB%d' % os.getpid() diff --git a/plugin.video.alfa/lib/sambatools/smb/SMBProtocol.py b/plugin.video.alfa/lib/sambatools/smb/SMBProtocol.py index 3795fae2..1238d637 100755 --- a/plugin.video.alfa/lib/sambatools/smb/SMBProtocol.py +++ b/plugin.video.alfa/lib/sambatools/smb/SMBProtocol.py @@ -1,398 +1,409 @@ -from twisted.internet import reactor, defer -from twisted.internet.protocol import ClientFactory, Protocol - -from base import SMB, NotConnectedError, NotReadyError, SMBTimeout -from smb_structs import * - -__all__ = [ 'SMBProtocolFactory', 'NotConnectedError', 'NotReadyError' ] - - -class SMBProtocol(Protocol, SMB): - - log = logging.getLogger('SMB.SMBProtocol') - - # - # Protocol Methods - # - - def connectionMade(self): - self.factory.instance = self - if not self.is_direct_tcp: - self.requestNMBSession() - else: - self.onNMBSessionOK() - - def connectionLost(self, reason): - if self.factory.instance == self: - self.instance = None - - def dataReceived(self, data): - self.feedData(data) - - # - # SMB (and its superclass) Methods - # - - def write(self, data): - self.transport.write(data) - - def onAuthOK(self): - if self.factory.instance == self: - self.factory.onAuthOK() - reactor.callLater(1, self._cleanupPendingRequests) - - def onAuthFailed(self): - if self.factory.instance == self: - self.factory.onAuthFailed() - - def onNMBSessionFailed(self): - self.log.error('Cannot establish NetBIOS session. You might have provided a wrong remote_name') - - # - # Protected Methods - # - - def _cleanupPendingRequests(self): - if self.factory.instance == self: - now = time.time() - to_remove = [] - for mid, r in self.pending_requests.iteritems(): - if r.expiry_time < now: - try: - r.errback(SMBTimeout()) - except Exception: pass - to_remove.append(mid) - - for mid in to_remove: - del self.pending_requests[mid] - - reactor.callLater(1, self._cleanupPendingRequests) - - -class SMBProtocolFactory(ClientFactory): - - protocol = SMBProtocol - log = logging.getLogger('SMB.SMBFactory') - - #: SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. - SIGN_NEVER = 0 - #: SMB messages will be signed when remote server supports signing but not requires signing. - SIGN_WHEN_SUPPORTED = 1 - #: SMB messages will only be signed when remote server requires signing. - SIGN_WHEN_REQUIRED = 2 - - def __init__(self, username, password, my_name, remote_name, domain = '', use_ntlm_v2 = True, sign_options = SIGN_WHEN_REQUIRED, is_direct_tcp = False): - """ - Create a new SMBProtocolFactory instance. You will pass this instance to *reactor.connectTCP()* which will then instantiate the TCP connection to the remote SMB/CIFS server. - Note that the default TCP port for most SMB/CIFS servers using NetBIOS over TCP/IP is 139. - Some newer server installations might also support Direct hosting of SMB over TCP/IP; for these servers, the default TCP port is 445. - - *username* and *password* are the user credentials required to authenticate the underlying SMB connection with the remote server. - File operations can only be proceeded after the connection has been authenticated successfully. - - :param string my_name: The local NetBIOS machine name that will identify where this connection is originating from. - You can freely choose a name as long as it contains a maximum of 15 alphanumeric characters and does not contain spaces and any of ``\/:*?";|+``. - :param string remote_name: The NetBIOS machine name of the remote server. - On windows, you can find out the machine name by right-clicking on the "My Computer" and selecting "Properties". - This parameter must be the same as what has been configured on the remote server, or else the connection will be rejected. - :param string domain: The network domain. On windows, it is known as the workgroup. Usually, it is safe to leave this parameter as an empty string. - :param boolean use_ntlm_v2: Indicates whether pysmb should be NTLMv1 or NTLMv2 authentication algorithm for authentication. - The choice of NTLMv1 and NTLMv2 is configured on the remote server, and there is no mechanism to auto-detect which algorithm has been configured. - Hence, we can only "guess" or try both algorithms. - On Sambda, Windows Vista and Windows 7, NTLMv2 is enabled by default. On Windows XP, we can use NTLMv1 before NTLMv2. - :param int sign_options: Determines whether SMB messages will be signed. Default is *SIGN_WHEN_REQUIRED*. - If *SIGN_WHEN_REQUIRED* (value=2), SMB messages will only be signed when remote server requires signing. - If *SIGN_WHEN_SUPPORTED* (value=1), SMB messages will be signed when remote server supports signing but not requires signing. - If *SIGN_NEVER* (value=0), SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. - :param boolean is_direct_tcp: Controls whether the NetBIOS over TCP/IP (is_direct_tcp=False) or the newer Direct hosting of SMB over TCP/IP (is_direct_tcp=True) will be used for the communication. - The default parameter is False which will use NetBIOS over TCP/IP for wider compatibility (TCP port: 139). - """ - self.username = username - self.password = password - self.my_name = my_name - self.remote_name = remote_name - self.domain = domain - self.use_ntlm_v2 = use_ntlm_v2 - self.sign_options = sign_options - self.is_direct_tcp = is_direct_tcp - self.instance = None #: The single SMBProtocol instance for each SMBProtocolFactory instance. Usually, you should not need to touch this attribute directly. - - # - # Public Property - # - - @property - def isReady(self): - """A convenient property to return True if the underlying SMB connection has connected to remote server, has successfully authenticated itself and is ready for file operations.""" - return bool(self.instance and self.instance.has_authenticated) - - @property - def isUsingSMB2(self): - """A convenient property to return True if the underlying SMB connection is using SMB2 protocol.""" - return self.instance and self.instance.is_using_smb2 - - # - # Public Methods for Callbacks - # - - def onAuthOK(self): - """ - Override this method in your *SMBProtocolFactory* subclass to add in post-authentication handling. - This method will be called when the server has replied that the SMB connection has been successfully authenticated. - File operations can proceed when this method has been called. - """ - pass - - def onAuthFailed(self): - """ - Override this method in your *SMBProtocolFactory* subclass to add in post-authentication handling. - This method will be called when the server has replied that the SMB connection has been successfully authenticated. - - If you want to retry authenticating from this method, - 1. Disconnect the underlying SMB connection (call ``self.instance.transport.loseConnection()``) - 2. Create a new SMBProtocolFactory subclass instance with different user credientials or different NTLM algorithm flag. - 3. Call ``reactor.connectTCP`` with the new instance to re-establish the SMB connection - """ - pass - - # - # Public Methods - # - - def listShares(self, timeout = 30): - """ - Retrieve a list of shared resources on remote server. - - :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of :doc:`smb.base.SharedDevice<smb_SharedDevice>` instances. - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._listShares(d.callback, d.errback, timeout) - return d - - def listPath(self, service_name, path, - search = SMB_FILE_ATTRIBUTE_READONLY | SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_DIRECTORY | SMB_FILE_ATTRIBUTE_ARCHIVE, - pattern = '*', timeout = 30): - """ - Retrieve a directory listing of files/folders at *path* - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: path relative to the *service_name* where we are interested to learn about its files/sub-folders. - :param integer search: integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py). - The default *search* value will query for all read-only, hidden, system, archive files and directories. - :param string/unicode pattern: the filter to apply to the results before returning to the client. - :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of :doc:`smb.base.SharedFile<smb_SharedFile>` instances. - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._listPath(service_name, path, d.callback, d.errback, search = search, pattern = pattern, timeout = timeout) - return d - - def listSnapshots(self, service_name, path, timeout = 30): - """ - Retrieve a list of available snapshots (a.k.a. shadow copies) for *path*. - - Note that snapshot features are only supported on Windows Vista Business, Enterprise and Ultimate, and on all Windows 7 editions. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: path relative to the *service_name* where we are interested in the list of available snapshots - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of python *datetime.DateTime* - instances in GMT/UTC time zone - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._listSnapshots(service_name, path, d.callback, d.errback, timeout = timeout) - return d - - def getAttributes(self, service_name, path, timeout = 30): - """ - Retrieve information about the file at *path* on the *service_name*. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a :doc:`smb.base.SharedFile<smb_SharedFile>` instance containing the attributes of the file. - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._getAttributes(service_name, path, d.callback, d.errback, timeout = timeout) - return d - - def retrieveFile(self, service_name, path, file_obj, timeout = 30): - """ - Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. - - Use *retrieveFileFromOffset()* method if you need to specify the offset to read from the remote *path* and/or the maximum number of bytes to write to the *file_obj*. - - The meaning of the *timeout* parameter will be different from other file operation methods. As the downloaded file usually exceeeds the maximum size - of each SMB/CIFS data message, it will be packetized into a series of request messages (each message will request about about 60kBytes). - The *timeout* parameter is an integer/float value that specifies the timeout interval for these individual SMB/CIFS message to be transmitted and downloaded from the remote SMB/CIFS server. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be called in the returned *Deferred* errback. - :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* until EOF is received from the remote service. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 3-element tuple of ( *file_obj*, file attributes of the file on server, number of bytes written to *file_obj* ). - The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) - """ - return self.retrieveFileFromOffset(service_name, path, file_obj, 0L, -1L, timeout) - - def retrieveFileFromOffset(self, service_name, path, file_obj, offset = 0L, max_length = -1L, timeout = 30): - """ - Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. - - The meaning of the *timeout* parameter will be different from other file operation methods. As the downloaded file usually exceeeds the maximum size - of each SMB/CIFS data message, it will be packetized into a series of request messages (each message will request about about 60kBytes). - The *timeout* parameter is an integer/float value that specifies the timeout interval for these individual SMB/CIFS message to be transmitted and downloaded from the remote SMB/CIFS server. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be called in the returned *Deferred* errback. - :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* until EOF is received from the remote service. - :param integer/long offset: the offset in the remote *path* where the first byte will be read and written to *file_obj*. Must be either zero or a positive integer/long value. - :param integer/long max_length: maximum number of bytes to read from the remote *path* and write to the *file_obj*. Specify a negative value to read from *offset* to the EOF. - If zero, the *Deferred* callback is invoked immediately after the file is opened successfully for reading. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 3-element tuple of ( *file_obj*, file attributes of the file on server, number of bytes written to *file_obj* ). - The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._retrieveFileFromOffset(service_name, path, file_obj, d.callback, d.errback, offset, max_length, timeout = timeout) - return d - - def storeFile(self, service_name, path, file_obj, timeout = 30): - """ - Store the contents of the *file_obj* at *path* on the *service_name*. - - The meaning of the *timeout* parameter will be different from other file operation methods. As the uploaded file usually exceeeds the maximum size - of each SMB/CIFS data message, it will be packetized into a series of messages (usually about 60kBytes). - The *timeout* parameter is an integer/float value that specifies the timeout interval for these individual SMB/CIFS message to be transmitted and acknowledged - by the remote SMB/CIFS server. - - :param string/unicode service_name: the name of the shared folder for the *path* - :param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. Otherwise, it will be overwritten. - If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be called in the returned *Deferred* errback. - :param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 2-element tuple of ( *file_obj*, number of bytes uploaded ). - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._storeFile(service_name, path, file_obj, d.callback, d.errback, timeout = timeout) - return d - - def deleteFiles(self, service_name, path_file_pattern, timeout = 30): - """ - Delete one or more regular files. It supports the use of wildcards in file names, allowing for deletion of multiple files in a single request. - - :param string/unicode service_name: Contains the name of the shared folder. - :param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name. - Wildcards may be used in th filename component of the path. - If your path/filename contains non-English characters, you must pass in an unicode string. - :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *path_file_pattern* parameter. - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._deleteFiles(service_name, path_file_pattern, d.callback, d.errback, timeout = timeout) - return d - - def createDirectory(self, service_name, path): - """ - Creates a new directory *path* on the *service_name*. - - :param string/unicode service_name: Contains the name of the shared folder. - :param string/unicode path: The path of the new folder (relative to) the shared folder. - If the path contains non-English characters, an unicode string must be used to pass in the path. - :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *path* parameter. - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._createDirectory(service_name, path, d.callback, d.errback) - return d - - def deleteDirectory(self, service_name, path): - """ - Delete the empty folder at *path* on *service_name* - - :param string/unicode service_name: Contains the name of the shared folder. - :param string/unicode path: The path of the to-be-deleted folder (relative to) the shared folder. - If the path contains non-English characters, an unicode string must be used to pass in the path. - :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *path* parameter. - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._deleteDirectory(service_name, path, d.callback, d.errback) - return d - - def rename(self, service_name, old_path, new_path): - """ - Rename a file or folder at *old_path* to *new_path* shared at *service_name*. Note that this method cannot be used to rename file/folder across different shared folders - - *old_path* and *new_path* are string/unicode referring to the old and new path of the renamed resources (relative to) the shared folder. - If the path contains non-English characters, an unicode string must be used to pass in the path. - - :param string/unicode service_name: Contains the name of the shared folder. - :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 2-element tuple of ( *old_path*, *new_path* ). - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._rename(service_name, old_path, new_path, d.callback, d.errback) - return d - - def echo(self, data, timeout = 10): - """ - Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*. - - :param string data: Data to send to the remote server. - :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. - :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *data* parameter. - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - d = defer.Deferred() - self.instance._echo(data, d.callback, d.errback, timeout) - return d - - def closeConnection(self): - """ - Disconnect from the remote SMB/CIFS server. The TCP connection will be closed at the earliest opportunity after this method returns. - - :return: None - """ - if not self.instance: - raise NotConnectedError('Not connected to server') - - self.instance.transport.loseConnection() - - # - # ClientFactory methods - # (Do not touch these unless you know what you are doing) - # - - def buildProtocol(self, addr): - p = self.protocol(self.username, self.password, self.my_name, self.remote_name, self.domain, self.use_ntlm_v2, self.sign_options, self.is_direct_tcp) - p.factory = self - return p + +import os, logging, time +from twisted.internet import reactor, defer +from twisted.internet.protocol import ClientFactory, Protocol +from smb_constants import * +from smb_structs import * +from base import SMB, NotConnectedError, NotReadyError, SMBTimeout + + +__all__ = [ 'SMBProtocolFactory', 'NotConnectedError', 'NotReadyError' ] + + +class SMBProtocol(Protocol, SMB): + + log = logging.getLogger('SMB.SMBProtocol') + + # + # Protocol Methods + # + + def connectionMade(self): + self.factory.instance = self + if not self.is_direct_tcp: + self.requestNMBSession() + else: + self.onNMBSessionOK() + + def connectionLost(self, reason): + if self.factory.instance == self: + self.instance = None + + def dataReceived(self, data): + self.feedData(data) + + # + # SMB (and its superclass) Methods + # + + def write(self, data): + self.transport.write(data) + + def onAuthOK(self): + if self.factory.instance == self: + self.factory.onAuthOK() + reactor.callLater(1, self._cleanupPendingRequests) + + def onAuthFailed(self): + if self.factory.instance == self: + self.factory.onAuthFailed() + + def onNMBSessionFailed(self): + self.log.error('Cannot establish NetBIOS session. You might have provided a wrong remote_name') + + # + # Protected Methods + # + + def _cleanupPendingRequests(self): + if self.factory.instance == self: + now = time.time() + to_remove = [] + for mid, r in self.pending_requests.iteritems(): + if r.expiry_time < now: + try: + r.errback(SMBTimeout()) + except Exception: pass + to_remove.append(mid) + + for mid in to_remove: + del self.pending_requests[mid] + + reactor.callLater(1, self._cleanupPendingRequests) + + +class SMBProtocolFactory(ClientFactory): + + protocol = SMBProtocol + log = logging.getLogger('SMB.SMBFactory') + + #: SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. + SIGN_NEVER = 0 + #: SMB messages will be signed when remote server supports signing but not requires signing. + SIGN_WHEN_SUPPORTED = 1 + #: SMB messages will only be signed when remote server requires signing. + SIGN_WHEN_REQUIRED = 2 + + def __init__(self, username, password, my_name, remote_name, domain = '', use_ntlm_v2 = True, sign_options = SIGN_WHEN_REQUIRED, is_direct_tcp = False): + """ + Create a new SMBProtocolFactory instance. You will pass this instance to *reactor.connectTCP()* which will then instantiate the TCP connection to the remote SMB/CIFS server. + Note that the default TCP port for most SMB/CIFS servers using NetBIOS over TCP/IP is 139. + Some newer server installations might also support Direct hosting of SMB over TCP/IP; for these servers, the default TCP port is 445. + + *username* and *password* are the user credentials required to authenticate the underlying SMB connection with the remote server. + File operations can only be proceeded after the connection has been authenticated successfully. + + :param string my_name: The local NetBIOS machine name that will identify where this connection is originating from. + You can freely choose a name as long as it contains a maximum of 15 alphanumeric characters and does not contain spaces and any of ``\/:*?";|+``. + :param string remote_name: The NetBIOS machine name of the remote server. + On windows, you can find out the machine name by right-clicking on the "My Computer" and selecting "Properties". + This parameter must be the same as what has been configured on the remote server, or else the connection will be rejected. + :param string domain: The network domain. On windows, it is known as the workgroup. Usually, it is safe to leave this parameter as an empty string. + :param boolean use_ntlm_v2: Indicates whether pysmb should be NTLMv1 or NTLMv2 authentication algorithm for authentication. + The choice of NTLMv1 and NTLMv2 is configured on the remote server, and there is no mechanism to auto-detect which algorithm has been configured. + Hence, we can only "guess" or try both algorithms. + On Sambda, Windows Vista and Windows 7, NTLMv2 is enabled by default. On Windows XP, we can use NTLMv1 before NTLMv2. + :param int sign_options: Determines whether SMB messages will be signed. Default is *SIGN_WHEN_REQUIRED*. + If *SIGN_WHEN_REQUIRED* (value=2), SMB messages will only be signed when remote server requires signing. + If *SIGN_WHEN_SUPPORTED* (value=1), SMB messages will be signed when remote server supports signing but not requires signing. + If *SIGN_NEVER* (value=0), SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing. + :param boolean is_direct_tcp: Controls whether the NetBIOS over TCP/IP (is_direct_tcp=False) or the newer Direct hosting of SMB over TCP/IP (is_direct_tcp=True) will be used for the communication. + The default parameter is False which will use NetBIOS over TCP/IP for wider compatibility (TCP port: 139). + """ + self.username = username + self.password = password + self.my_name = my_name + self.remote_name = remote_name + self.domain = domain + self.use_ntlm_v2 = use_ntlm_v2 + self.sign_options = sign_options + self.is_direct_tcp = is_direct_tcp + self.instance = None #: The single SMBProtocol instance for each SMBProtocolFactory instance. Usually, you should not need to touch this attribute directly. + + # + # Public Property + # + + @property + def isReady(self): + """A convenient property to return True if the underlying SMB connection has connected to remote server, has successfully authenticated itself and is ready for file operations.""" + return bool(self.instance and self.instance.has_authenticated) + + @property + def isUsingSMB2(self): + """A convenient property to return True if the underlying SMB connection is using SMB2 protocol.""" + return self.instance and self.instance.is_using_smb2 + + # + # Public Methods for Callbacks + # + + def onAuthOK(self): + """ + Override this method in your *SMBProtocolFactory* subclass to add in post-authentication handling. + This method will be called when the server has replied that the SMB connection has been successfully authenticated. + File operations can proceed when this method has been called. + """ + pass + + def onAuthFailed(self): + """ + Override this method in your *SMBProtocolFactory* subclass to add in post-authentication handling. + This method will be called when the server has replied that the SMB connection has been successfully authenticated. + + If you want to retry authenticating from this method, + 1. Disconnect the underlying SMB connection (call ``self.instance.transport.loseConnection()``) + 2. Create a new SMBProtocolFactory subclass instance with different user credientials or different NTLM algorithm flag. + 3. Call ``reactor.connectTCP`` with the new instance to re-establish the SMB connection + """ + pass + + # + # Public Methods + # + + def listShares(self, timeout = 30): + """ + Retrieve a list of shared resources on remote server. + + :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of :doc:`smb.base.SharedDevice<smb_SharedDevice>` instances. + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._listShares(d.callback, d.errback, timeout) + return d + + def listPath(self, service_name, path, + search = SMB_FILE_ATTRIBUTE_READONLY | SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_DIRECTORY | SMB_FILE_ATTRIBUTE_ARCHIVE | SMB_FILE_ATTRIBUTE_INCL_NORMAL, + pattern = '*', timeout = 30): + """ + Retrieve a directory listing of files/folders at *path* + + For simplicity, pysmb defines a "normal" file as a file entry that is not read-only, not hidden, not system, not archive and not a directory. + It ignores other attributes like compression, indexed, sparse, temporary and encryption. + + Note that the default search parameter will query for all read-only (SMB_FILE_ATTRIBUTE_READONLY), hidden (SMB_FILE_ATTRIBUTE_HIDDEN), + system (SMB_FILE_ATTRIBUTE_SYSTEM), archive (SMB_FILE_ATTRIBUTE_ARCHIVE), normal (SMB_FILE_ATTRIBUTE_INCL_NORMAL) files + and directories (SMB_FILE_ATTRIBUTE_DIRECTORY). + If you do not need to include "normal" files in the result, define your own search parameter without the SMB_FILE_ATTRIBUTE_INCL_NORMAL constant. + SMB_FILE_ATTRIBUTE_NORMAL should be used by itself and not be used with other bit constants. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: path relative to the *service_name* where we are interested to learn about its files/sub-folders. + :param integer search: integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py). + :param string/unicode pattern: the filter to apply to the results before returning to the client. + :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of :doc:`smb.base.SharedFile<smb_SharedFile>` instances. + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._listPath(service_name, path, d.callback, d.errback, search = search, pattern = pattern, timeout = timeout) + return d + + def listSnapshots(self, service_name, path, timeout = 30): + """ + Retrieve a list of available snapshots (a.k.a. shadow copies) for *path*. + + Note that snapshot features are only supported on Windows Vista Business, Enterprise and Ultimate, and on all Windows 7 editions. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: path relative to the *service_name* where we are interested in the list of available snapshots + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a list of python *datetime.DateTime* + instances in GMT/UTC time zone + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._listSnapshots(service_name, path, d.callback, d.errback, timeout = timeout) + return d + + def getAttributes(self, service_name, path, timeout = 30): + """ + Retrieve information about the file at *path* on the *service_name*. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a :doc:`smb.base.SharedFile<smb_SharedFile>` instance containing the attributes of the file. + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._getAttributes(service_name, path, d.callback, d.errback, timeout = timeout) + return d + + def retrieveFile(self, service_name, path, file_obj, timeout = 30): + """ + Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. + + Use *retrieveFileFromOffset()* method if you need to specify the offset to read from the remote *path* and/or the maximum number of bytes to write to the *file_obj*. + + The meaning of the *timeout* parameter will be different from other file operation methods. As the downloaded file usually exceeeds the maximum size + of each SMB/CIFS data message, it will be packetized into a series of request messages (each message will request about about 60kBytes). + The *timeout* parameter is an integer/float value that specifies the timeout interval for these individual SMB/CIFS message to be transmitted and downloaded from the remote SMB/CIFS server. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be called in the returned *Deferred* errback. + :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* until EOF is received from the remote service. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 3-element tuple of ( *file_obj*, file attributes of the file on server, number of bytes written to *file_obj* ). + The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) + """ + return self.retrieveFileFromOffset(service_name, path, file_obj, 0L, -1L, timeout) + + def retrieveFileFromOffset(self, service_name, path, file_obj, offset = 0L, max_length = -1L, timeout = 30): + """ + Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*. + + The meaning of the *timeout* parameter will be different from other file operation methods. As the downloaded file usually exceeeds the maximum size + of each SMB/CIFS data message, it will be packetized into a series of request messages (each message will request about about 60kBytes). + The *timeout* parameter is an integer/float value that specifies the timeout interval for these individual SMB/CIFS message to be transmitted and downloaded from the remote SMB/CIFS server. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be called in the returned *Deferred* errback. + :param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* until EOF is received from the remote service. + :param integer/long offset: the offset in the remote *path* where the first byte will be read and written to *file_obj*. Must be either zero or a positive integer/long value. + :param integer/long max_length: maximum number of bytes to read from the remote *path* and write to the *file_obj*. Specify a negative value to read from *offset* to the EOF. + If zero, the *Deferred* callback is invoked immediately after the file is opened successfully for reading. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 3-element tuple of ( *file_obj*, file attributes of the file on server, number of bytes written to *file_obj* ). + The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py) + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._retrieveFileFromOffset(service_name, path, file_obj, d.callback, d.errback, offset, max_length, timeout = timeout) + return d + + def storeFile(self, service_name, path, file_obj, timeout = 30): + """ + Store the contents of the *file_obj* at *path* on the *service_name*. + + The meaning of the *timeout* parameter will be different from other file operation methods. As the uploaded file usually exceeeds the maximum size + of each SMB/CIFS data message, it will be packetized into a series of messages (usually about 60kBytes). + The *timeout* parameter is an integer/float value that specifies the timeout interval for these individual SMB/CIFS message to be transmitted and acknowledged + by the remote SMB/CIFS server. + + :param string/unicode service_name: the name of the shared folder for the *path* + :param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. Otherwise, it will be overwritten. + If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be called in the returned *Deferred* errback. + :param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 2-element tuple of ( *file_obj*, number of bytes uploaded ). + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._storeFile(service_name, path, file_obj, d.callback, d.errback, timeout = timeout) + return d + + def deleteFiles(self, service_name, path_file_pattern, timeout = 30): + """ + Delete one or more regular files. It supports the use of wildcards in file names, allowing for deletion of multiple files in a single request. + + :param string/unicode service_name: Contains the name of the shared folder. + :param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name. + Wildcards may be used in th filename component of the path. + If your path/filename contains non-English characters, you must pass in an unicode string. + :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *path_file_pattern* parameter. + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._deleteFiles(service_name, path_file_pattern, d.callback, d.errback, timeout = timeout) + return d + + def createDirectory(self, service_name, path): + """ + Creates a new directory *path* on the *service_name*. + + :param string/unicode service_name: Contains the name of the shared folder. + :param string/unicode path: The path of the new folder (relative to) the shared folder. + If the path contains non-English characters, an unicode string must be used to pass in the path. + :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *path* parameter. + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._createDirectory(service_name, path, d.callback, d.errback) + return d + + def deleteDirectory(self, service_name, path): + """ + Delete the empty folder at *path* on *service_name* + + :param string/unicode service_name: Contains the name of the shared folder. + :param string/unicode path: The path of the to-be-deleted folder (relative to) the shared folder. + If the path contains non-English characters, an unicode string must be used to pass in the path. + :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *path* parameter. + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._deleteDirectory(service_name, path, d.callback, d.errback) + return d + + def rename(self, service_name, old_path, new_path): + """ + Rename a file or folder at *old_path* to *new_path* shared at *service_name*. Note that this method cannot be used to rename file/folder across different shared folders + + *old_path* and *new_path* are string/unicode referring to the old and new path of the renamed resources (relative to) the shared folder. + If the path contains non-English characters, an unicode string must be used to pass in the path. + + :param string/unicode service_name: Contains the name of the shared folder. + :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with a 2-element tuple of ( *old_path*, *new_path* ). + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._rename(service_name, old_path, new_path, d.callback, d.errback) + return d + + def echo(self, data, timeout = 10): + """ + Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*. + + :param bytes data: Data to send to the remote server. Must be a bytes object. + :param integer/float timeout: Number of seconds that pysmb will wait before raising *SMBTimeout* via the returned *Deferred* instance's *errback* method. + :return: A *twisted.internet.defer.Deferred* instance. The callback function will be called with the *data* parameter. + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + d = defer.Deferred() + self.instance._echo(data, d.callback, d.errback, timeout) + return d + + def closeConnection(self): + """ + Disconnect from the remote SMB/CIFS server. The TCP connection will be closed at the earliest opportunity after this method returns. + + :return: None + """ + if not self.instance: + raise NotConnectedError('Not connected to server') + + self.instance.transport.loseConnection() + + # + # ClientFactory methods + # (Do not touch these unless you know what you are doing) + # + + def buildProtocol(self, addr): + p = self.protocol(self.username, self.password, self.my_name, self.remote_name, self.domain, self.use_ntlm_v2, self.sign_options, self.is_direct_tcp) + p.factory = self + return p diff --git a/plugin.video.alfa/lib/sambatools/smb/__init__.py b/plugin.video.alfa/lib/sambatools/smb/__init__.py index d3f5a12f..8b137891 100755 --- a/plugin.video.alfa/lib/sambatools/smb/__init__.py +++ b/plugin.video.alfa/lib/sambatools/smb/__init__.py @@ -1 +1 @@ - + diff --git a/plugin.video.alfa/lib/sambatools/smb/base.py b/plugin.video.alfa/lib/sambatools/smb/base.py index 7de84e12..5dd9c826 100755 --- a/plugin.video.alfa/lib/sambatools/smb/base.py +++ b/plugin.video.alfa/lib/sambatools/smb/base.py @@ -1,2660 +1,2933 @@ -import hmac -from datetime import datetime - -from nmb.base import NMBSession - -import ntlm -import securityblob -from smb2_structs import * -from smb_structs import * -from utils import convertFILETIMEtoEpoch - -try: - import hashlib - sha256 = hashlib.sha256 -except ImportError: - from utils import sha256 - - -class NotReadyError(Exception): - """Raised when SMB connection is not ready (i.e. not authenticated or authentication failed)""" - pass - -class NotConnectedError(Exception): - """Raised when underlying SMB connection has been disconnected or not connected yet""" - pass - -class SMBTimeout(Exception): - """Raised when a timeout has occurred while waiting for a response or for a SMB/CIFS operation to complete.""" - pass - - -def _convert_to_unicode(string): - if not isinstance(string, unicode): - string = unicode(string, "utf-8") - return string - - -class SMB(NMBSession): - """ - This class represents a "connection" to the remote SMB/CIFS server. - It is not meant to be used directly in an application as it does not have any network transport implementations. - - For application use, please refer to - - L{SMBProtocol.SMBProtocolFactory<smb.SMBProtocol>} if you are using Twisted framework - - In [MS-CIFS], this class will contain attributes of Client, Client.Connection and Client.Session abstract data models. - - References: - =========== - - [MS-CIFS]: 3.2.1 - """ - - log = logging.getLogger('SMB.SMB') - - SIGN_NEVER = 0 - SIGN_WHEN_SUPPORTED = 1 - SIGN_WHEN_REQUIRED = 2 - - def __init__(self, username, password, my_name, remote_name, domain = '', use_ntlm_v2 = True, sign_options = SIGN_WHEN_REQUIRED, is_direct_tcp = False): - NMBSession.__init__(self, my_name, remote_name, is_direct_tcp = is_direct_tcp) - self.username = _convert_to_unicode(username) - self.password = _convert_to_unicode(password) - self.domain = _convert_to_unicode(domain) - self.sign_options = sign_options - self.is_direct_tcp = is_direct_tcp - self.use_ntlm_v2 = use_ntlm_v2 #: Similar to LMAuthenticationPolicy and NTAuthenticationPolicy as described in [MS-CIFS] 3.2.1.1 - self.smb_message = SMBMessage() - self.is_using_smb2 = False #: Are we communicating using SMB2 protocol? self.smb_message will be a SMB2Message instance if this flag is True - self.pending_requests = { } #: MID mapped to _PendingRequest instance - self.connected_trees = { } #: Share name mapped to TID - self.next_rpc_call_id = 1 #: Next RPC callID value. Not used directly in SMB message. Usually encapsulated in sub-commands under SMB_COM_TRANSACTION or SMB_COM_TRANSACTION2 messages - - self.has_negotiated = False - self.has_authenticated = False - self.is_signing_active = False #: True if the remote server accepts message signing. All outgoing messages will be signed. Simiar to IsSigningActive as described in [MS-CIFS] 3.2.1.2 - self.signing_session_key = None #: Session key for signing packets, if signing is active. Similar to SigningSessionKey as described in [MS-CIFS] 3.2.1.2 - self.signing_challenge_response = None #: Contains the challenge response for signing, if signing is active. Similar to SigningChallengeResponse as described in [MS-CIFS] 3.2.1.2 - self.mid = 0 - self.uid = 0 - self.next_signing_id = 2 #: Similar to ClientNextSendSequenceNumber as described in [MS-CIFS] 3.2.1.2 - - # SMB1 and SMB2 attributes - # Note that the interpretations of the values may differ between SMB1 and SMB2 protocols - self.capabilities = 0 - self.security_mode = 0 #: Initialized from the SecurityMode field of the SMB_COM_NEGOTIATE message - - # SMB1 attributes - # Most of the following attributes will be initialized upon receipt of SMB_COM_NEGOTIATE message from server (via self._updateServerInfo_SMB1 method) - self.use_plaintext_authentication = False #: Similar to PlaintextAuthenticationPolicy in in [MS-CIFS] 3.2.1.1 - self.max_raw_size = 0 - self.max_buffer_size = 0 #: Similar to MaxBufferSize as described in [MS-CIFS] 3.2.1.1 - self.max_mpx_count = 0 #: Similar to MaxMpxCount as described in [MS-CIFS] 3.2.1.1 - - # SMB2 attributes - self.max_read_size = 0 #: Similar to MaxReadSize as described in [MS-SMB2] 2.2.4 - self.max_write_size = 0 #: Similar to MaxWriteSize as described in [MS-SMB2] 2.2.4 - self.max_transact_size = 0 #: Similar to MaxTransactSize as described in [MS-SMB2] 2.2.4 - self.session_id = 0 #: Similar to SessionID as described in [MS-SMB2] 2.2.4. This will be set in _updateState_SMB2 method - - self._setupSMB1Methods() - - self.log.info('Authentication with remote machine "%s" for user "%s" will be using NTLM %s authentication (%s extended security)', - self.remote_name, self.username, - (self.use_ntlm_v2 and 'v2') or 'v1', - (SUPPORT_EXTENDED_SECURITY and 'with') or 'without') - - - # - # NMBSession Methods - # - - def onNMBSessionOK(self): - self._sendSMBMessage(SMBMessage(ComNegotiateRequest())) - - def onNMBSessionFailed(self): - pass - - def onNMBSessionMessage(self, flags, data): - while True: - try: - i = self.smb_message.decode(data) - except SMB2ProtocolHeaderError: - self.log.info('Now switching over to SMB2 protocol communication') - self.is_using_smb2 = True - self.mid = 0 # Must reset messageID counter, or else remote SMB2 server will disconnect - self._setupSMB2Methods() - self.smb_message = self._klassSMBMessage() - i = self.smb_message.decode(data) - - next_message_offset = 0 - if self.is_using_smb2: - next_message_offset = self.smb_message.next_command_offset - - if i > 0: - if not self.is_using_smb2: - self.log.debug('Received SMB message "%s" (command:0x%2X flags:0x%02X flags2:0x%04X TID:%d UID:%d)', - SMB_COMMAND_NAMES.get(self.smb_message.command, '<unknown>'), - self.smb_message.command, self.smb_message.flags, self.smb_message.flags2, self.smb_message.tid, self.smb_message.uid) - else: - self.log.debug('Received SMB2 message "%s" (command:0x%04X flags:0x%04x)', - SMB2_COMMAND_NAMES.get(self.smb_message.command, '<unknown>'), - self.smb_message.command, self.smb_message.flags) - if self._updateState(self.smb_message): - # We need to create a new instance instead of calling reset() because the instance could be captured in the message history. - self.smb_message = self._klassSMBMessage() - - if next_message_offset > 0: - data = data[next_message_offset:] - else: - break - - # - # Public Methods for Overriding in Subclasses - # - - def onAuthOK(self): - pass - - def onAuthFailed(self): - pass - - # - # Protected Methods - # - - def _setupSMB1Methods(self): - self._klassSMBMessage = SMBMessage - self._updateState = self._updateState_SMB1 - self._updateServerInfo = self._updateServerInfo_SMB1 - self._handleNegotiateResponse = self._handleNegotiateResponse_SMB1 - self._sendSMBMessage = self._sendSMBMessage_SMB1 - self._handleSessionChallenge = self._handleSessionChallenge_SMB1 - self._listShares = self._listShares_SMB1 - self._listPath = self._listPath_SMB1 - self._listSnapshots = self._listSnapshots_SMB1 - self._getAttributes = self._getAttributes_SMB1 - self._retrieveFile = self._retrieveFile_SMB1 - self._retrieveFileFromOffset = self._retrieveFileFromOffset_SMB1 - self._storeFile = self._storeFile_SMB1 - self._storeFileFromOffset = self._storeFileFromOffset_SMB1 - self._deleteFiles = self._deleteFiles_SMB1 - self._resetFileAttributes = self._resetFileAttributes_SMB1 - self._createDirectory = self._createDirectory_SMB1 - self._deleteDirectory = self._deleteDirectory_SMB1 - self._rename = self._rename_SMB1 - self._echo = self._echo_SMB1 - - def _setupSMB2Methods(self): - self._klassSMBMessage = SMB2Message - self._updateState = self._updateState_SMB2 - self._updateServerInfo = self._updateServerInfo_SMB2 - self._handleNegotiateResponse = self._handleNegotiateResponse_SMB2 - self._sendSMBMessage = self._sendSMBMessage_SMB2 - self._handleSessionChallenge = self._handleSessionChallenge_SMB2 - self._listShares = self._listShares_SMB2 - self._listPath = self._listPath_SMB2 - self._listSnapshots = self._listSnapshots_SMB2 - self._getAttributes = self._getAttributes_SMB2 - self._retrieveFile = self._retrieveFile_SMB2 - self._retrieveFileFromOffset = self._retrieveFileFromOffset_SMB2 - self._storeFile = self._storeFile_SMB2 - self._storeFileFromOffset = self._storeFileFromOffset_SMB2 - self._deleteFiles = self._deleteFiles_SMB2 - self._resetFileAttributes = self._resetFileAttributes_SMB2 - self._createDirectory = self._createDirectory_SMB2 - self._deleteDirectory = self._deleteDirectory_SMB2 - self._rename = self._rename_SMB2 - self._echo = self._echo_SMB2 - - def _getNextRPCCallID(self): - self.next_rpc_call_id += 1 - return self.next_rpc_call_id - - # - # SMB2 Methods Family - # - - def _sendSMBMessage_SMB2(self, smb_message): - if smb_message.mid == 0: - smb_message.mid = self._getNextMID_SMB2() - - if smb_message.command != SMB2_COM_NEGOTIATE and smb_message.command != SMB2_COM_ECHO: - smb_message.session_id = self.session_id - - if self.is_signing_active: - smb_message.flags |= SMB2_FLAGS_SIGNED - raw_data = smb_message.encode() - smb_message.signature = hmac.new(self.signing_session_key, raw_data, sha256).digest()[:16] - - smb_message.raw_data = smb_message.encode() - self.log.debug('MID is %d. Signature is %s. Total raw message is %d bytes', smb_message.mid, binascii.hexlify(smb_message.signature), len(smb_message.raw_data)) - else: - smb_message.raw_data = smb_message.encode() - self.sendNMBMessage(smb_message.raw_data) - - def _getNextMID_SMB2(self): - self.mid += 1 - return self.mid - - def _updateState_SMB2(self, message): - if message.isReply: - if message.command == SMB2_COM_NEGOTIATE: - if message.status == 0: - self.has_negotiated = True - self.log.info('SMB2 dialect negotiation successful') - self._updateServerInfo(message.payload) - self._handleNegotiateResponse(message) - else: - raise ProtocolError('Unknown status value (0x%08X) in SMB2_COM_NEGOTIATE' % message.status, - message.raw_data, message) - elif message.command == SMB2_COM_SESSION_SETUP: - if message.status == 0: - self.session_id = message.session_id - try: - result = securityblob.decodeAuthResponseSecurityBlob(message.payload.security_blob) - if result == securityblob.RESULT_ACCEPT_COMPLETED: - self.has_authenticated = True - self.log.info('Authentication (on SMB2) successful!') - self.onAuthOK() - else: - raise ProtocolError('SMB2_COM_SESSION_SETUP status is 0 but security blob negResult value is %d' % result, message.raw_data, message) - except securityblob.BadSecurityBlobError, ex: - raise ProtocolError(str(ex), message.raw_data, message) - elif message.status == 0xc0000016: # STATUS_MORE_PROCESSING_REQUIRED - self.session_id = message.session_id - try: - result, ntlm_token = securityblob.decodeChallengeSecurityBlob(message.payload.security_blob) - if result == securityblob.RESULT_ACCEPT_INCOMPLETE: - self._handleSessionChallenge(message, ntlm_token) - except ( securityblob.BadSecurityBlobError, securityblob.UnsupportedSecurityProvider ), ex: - raise ProtocolError(str(ex), message.raw_data, message) - elif message.status == 0xc000006d: # STATUS_LOGON_FAILURE - self.has_authenticated = False - self.log.info('Authentication (on SMB2) failed. Please check username and password.') - self.onAuthFailed() - else: - raise ProtocolError('Unknown status value (0x%08X) in SMB_COM_SESSION_SETUP_ANDX (with extended security)' % message.status, - message.raw_data, message) - - req = self.pending_requests.pop(message.mid, None) - if req: - req.callback(message, **req.kwargs) - return True - - - def _updateServerInfo_SMB2(self, payload): - self.capabilities = payload.capabilities - self.security_mode = payload.security_mode - self.max_transact_size = payload.max_transact_size - self.max_read_size = payload.max_read_size - self.max_write_size = payload.max_write_size - self.use_plaintext_authentication = False # SMB2 never allows plaintext authentication - - - def _handleNegotiateResponse_SMB2(self, message): - ntlm_data = ntlm.generateNegotiateMessage() - blob = securityblob.generateNegotiateSecurityBlob(ntlm_data) - self._sendSMBMessage(SMB2Message(SMB2SessionSetupRequest(blob))) - - - def _handleSessionChallenge_SMB2(self, message, ntlm_token): - server_challenge, server_flags, server_info = ntlm.decodeChallengeMessage(ntlm_token) - - self.log.info('Performing NTLMv2 authentication (on SMB2) with server challenge "%s"', binascii.hexlify(server_challenge)) - - if self.use_ntlm_v2: - self.log.info('Performing NTLMv2 authentication (on SMB2) with server challenge "%s"', binascii.hexlify(server_challenge)) - nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV2(self.password, - self.username, - server_challenge, - server_info, - self.domain) - - else: - self.log.info('Performing NTLMv1 authentication (on SMB2) with server challenge "%s"', binascii.hexlify(server_challenge)) - nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV1(self.password, server_challenge, True) - - ntlm_data = ntlm.generateAuthenticateMessage(server_flags, - nt_challenge_response, - lm_challenge_response, - session_key, - self.username, - self.domain) - - if self.log.isEnabledFor(logging.DEBUG): - self.log.debug('NT challenge response is "%s" (%d bytes)', binascii.hexlify(nt_challenge_response), len(nt_challenge_response)) - self.log.debug('LM challenge response is "%s" (%d bytes)', binascii.hexlify(lm_challenge_response), len(lm_challenge_response)) - - blob = securityblob.generateAuthSecurityBlob(ntlm_data) - self._sendSMBMessage(SMB2Message(SMB2SessionSetupRequest(blob))) - - if self.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED: - self.log.info('Server requires all SMB messages to be signed') - self.is_signing_active = (self.sign_options != SMB.SIGN_NEVER) - elif self.security_mode & SMB2_NEGOTIATE_SIGNING_ENABLED: - self.log.info('Server supports SMB signing') - self.is_signing_active = (self.sign_options == SMB.SIGN_WHEN_SUPPORTED) - else: - self.is_signing_active = False - - if self.is_signing_active: - self.log.info("SMB signing activated. All SMB messages will be signed.") - self.signing_session_key = (session_key + '\0'*16)[:16] - if self.capabilities & CAP_EXTENDED_SECURITY: - self.signing_challenge_response = None - else: - self.signing_challenge_response = blob - else: - self.log.info("SMB signing deactivated. SMB messages will NOT be signed.") - - - def _listShares_SMB2(self, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = 'IPC$' - messages_history = [ ] - - def connectSrvSvc(tid): - m = SMB2Message(SMB2CreateRequest('srvsvc', - file_attributes = 0, - access_mask = FILE_READ_DATA | FILE_WRITE_DATA | FILE_APPEND_DATA | FILE_READ_EA | FILE_WRITE_EA | READ_CONTROL | FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | SYNCHRONIZE, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_NON_DIRECTORY_FILE | FILE_OPEN_NO_RECALL, - create_disp = FILE_OPEN)) - - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectSrvSvcCB, errback) - messages_history.append(m) - - def connectSrvSvcCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - call_id = self._getNextRPCCallID() - # The data_bytes are binding call to Server Service RPC using DCE v1.1 RPC over SMB. See [MS-SRVS] and [C706] - # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream - data_bytes = \ - binascii.unhexlify("""05 00 0b 03 10 00 00 00 74 00 00 00""".replace(' ', '')) + \ - struct.pack('<I', call_id) + \ - binascii.unhexlify(""" -b8 10 b8 10 00 00 00 00 02 00 00 00 00 00 01 00 -c8 4f 32 4b 70 16 d3 01 12 78 5a 47 bf 6e e1 88 -03 00 00 00 04 5d 88 8a eb 1c c9 11 9f e8 08 00 -2b 10 48 60 02 00 00 00 01 00 01 00 c8 4f 32 4b -70 16 d3 01 12 78 5a 47 bf 6e e1 88 03 00 00 00 -2c 1c b7 6c 12 98 40 45 03 00 00 00 00 00 00 00 -01 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2WriteRequest(create_message.payload.fid, data_bytes, 0)) - m.tid = create_message.tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, rpcBindCB, errback, fid = create_message.payload.fid) - messages_history.append(m) - else: - errback(OperationFailure('Failed to list shares: Unable to locate Server Service RPC endpoint', messages_history)) - - def rpcBindCB(trans_message, **kwargs): - messages_history.append(trans_message) - if trans_message.status == 0: - m = SMB2Message(SMB2ReadRequest(kwargs['fid'], read_len = 1024, read_offset = 0)) - m.tid = trans_message.tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, rpcReadCB, errback, fid = kwargs['fid']) - messages_history.append(m) - else: - closeFid(trans_message.tid, kwargs['fid'], error = 'Failed to list shares: Unable to read from Server Service RPC endpoint') - - def rpcReadCB(read_message, **kwargs): - messages_history.append(read_message) - if read_message.status == 0: - call_id = self._getNextRPCCallID() - - padding = '' - remote_name = '\\\\' + self.remote_name - server_len = len(remote_name) + 1 - server_bytes_len = server_len * 2 - if server_len % 2 != 0: - padding = '\0\0' - server_bytes_len += 2 - - # The data bytes are the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. - # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream - data_bytes = \ - binascii.unhexlify("""05 00 00 03 10 00 00 00""".replace(' ', '')) + \ - struct.pack('<HHI', 72+server_bytes_len, 0, call_id) + \ - binascii.unhexlify("""4c 00 00 00 00 00 0f 00 00 00 02 00""".replace(' ', '')) + \ - struct.pack('<III', server_len, 0, server_len) + \ - (remote_name + '\0').encode('UTF-16LE') + padding + \ - binascii.unhexlify(""" -01 00 00 00 01 00 00 00 04 00 02 00 00 00 00 00 -00 00 00 00 ff ff ff ff 08 00 02 00 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2IoctlRequest(kwargs['fid'], 0x0011C017, flags = 0x01, max_out_size = 8196, in_data = data_bytes)) - m.tid = read_message.tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, listShareResultsCB, errback, fid = kwargs['fid']) - messages_history.append(m) - else: - closeFid(read_message.tid, kwargs['fid'], error = 'Failed to list shares: Unable to bind to Server Service RPC endpoint') - - def listShareResultsCB(result_message, **kwargs): - messages_history.append(result_message) - if result_message.status == 0: - # The payload.data_bytes will contain the results of the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. - data_bytes = result_message.payload.out_data - - if ord(data_bytes[3]) & 0x02 == 0: - sendReadRequest(result_message.tid, kwargs['fid'], data_bytes) - else: - decodeResults(result_message.tid, kwargs['fid'], data_bytes) - elif result_message.status == 0x0103: # STATUS_PENDING - self.pending_requests[result_message.mid] = _PendingRequest(result_message.mid, expiry_time, listShareResultsCB, errback, fid = kwargs['fid']) - else: - closeFid(result_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) - - def decodeResults(tid, fid, data_bytes): - shares_count = struct.unpack('<I', data_bytes[36:40])[0] - results = [ ] # A list of SharedDevice instances - offset = 36 + 12 # You need to study the byte stream to understand the meaning of these constants - for i in range(0, shares_count): - results.append(SharedDevice(struct.unpack('<I', data_bytes[offset+4:offset+8])[0], None, None)) - offset += 12 - - for i in range(0, shares_count): - max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) - offset += 12 - results[i].name = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') - - if length % 2 != 0: - offset += (length * 2 + 2) - else: - offset += (length * 2) - - max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) - offset += 12 - results[i].comments = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') - - if length % 2 != 0: - offset += (length * 2 + 2) - else: - offset += (length * 2) - - closeFid(tid, fid) - callback(results) - - def sendReadRequest(tid, fid, data_bytes): - read_count = min(4280, self.max_read_size) - m = SMB2Message(SMB2ReadRequest(fid, 0, read_count)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, - fid = fid, data_bytes = data_bytes) - - def readCB(read_message, **kwargs): - messages_history.append(read_message) - if read_message.status == 0: - data_len = read_message.payload.data_length - data_bytes = read_message.payload.data - - if ord(data_bytes[3]) & 0x02 == 0: - sendReadRequest(read_message.tid, kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:data_len-24]) - else: - decodeResults(read_message.tid, kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:data_len-24]) - else: - closeFid(read_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) - - def closeFid(tid, fid, results = None, error = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, results = results, error = error) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['results'] is not None: - callback(kwargs['results']) - elif kwargs['error'] is not None: - errback(OperationFailure(kwargs['error'], messages_history)) - - if not self.connected_trees.has_key(path): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[path] = connect_message.tid - connectSrvSvc(connect_message.tid) - else: - errback(OperationFailure('Failed to list shares: Unable to connect to IPC$', messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), path ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = path) - messages_history.append(m) - else: - connectSrvSvc(self.connected_trees[path]) - - def _listPath_SMB2(self, service_name, path, callback, errback, search, pattern, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - results = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES | SYNCHRONIZE, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_DIRECTORY_FILE, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, createCB, errback) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - sendQuery(create_message.tid, create_message.payload.fid, '') - else: - errback(OperationFailure('Failed to list %s on %s: Unable to open directory' % ( path, service_name ), messages_history)) - - def sendQuery(tid, fid, data_buf): - m = SMB2Message(SMB2QueryDirectoryRequest(fid, pattern, - info_class = 0x03, # FileBothDirectoryInformation - flags = 0, - output_buf_len = self.max_transact_size)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, queryCB, errback, fid = fid, data_buf = data_buf) - messages_history.append(m) - - def queryCB(query_message, **kwargs): - messages_history.append(query_message) - if query_message.status == 0: - data_buf = decodeQueryStruct(kwargs['data_buf'] + query_message.payload.data) - sendQuery(query_message.tid, kwargs['fid'], data_buf) - elif query_message.status == 0x80000006L: # STATUS_NO_MORE_FILES - closeFid(query_message.tid, kwargs['fid'], results = results) - else: - closeFid(query_message.tid, kwargs['fid'], error = query_message.status) - - def decodeQueryStruct(data_bytes): - # SMB_FIND_FILE_BOTH_DIRECTORY_INFO structure. See [MS-CIFS]: 2.2.8.1.7 and [MS-SMB]: 2.2.8.1.1 - info_format = '<IIQQQQQQIIIBB24s' - info_size = struct.calcsize(info_format) - - data_length = len(data_bytes) - offset = 0 - while offset < data_length: - if offset + info_size > data_length: - return data_bytes[offset:] - - next_offset, _, \ - create_time, last_access_time, last_write_time, last_attr_change_time, \ - file_size, alloc_size, file_attributes, filename_length, ea_size, \ - short_name_length, _, short_name = struct.unpack(info_format, data_bytes[offset:offset+info_size]) - - offset2 = offset + info_size - if offset2 + filename_length > data_length: - return data_bytes[offset:] - - filename = data_bytes[offset2:offset2+filename_length].decode('UTF-16LE') - short_name = short_name.decode('UTF-16LE') - results.append(SharedFile(convertFILETIMEtoEpoch(create_time), convertFILETIMEtoEpoch(last_access_time), - convertFILETIMEtoEpoch(last_write_time), convertFILETIMEtoEpoch(last_attr_change_time), - file_size, alloc_size, file_attributes, short_name, filename)) - - if next_offset: - offset += next_offset - else: - break - return '' - - def closeFid(tid, fid, results = None, error = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, results = results, error = error) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['results'] is not None: - callback(kwargs['results']) - elif kwargs['error'] is not None: - errback(OperationFailure('Failed to list %s on %s: Query failed with errorcode 0x%08x' % ( path, service_name, kwargs['error'] ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to list %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _getAttributes_SMB2(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES | SYNCHRONIZE, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = 0, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, createCB, errback) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - p = create_message.payload - info = SharedFile(p.create_time, p.lastaccess_time, p.lastwrite_time, p.change_time, - p.file_size, p.allocation_size, p.file_attributes, - unicode(path), unicode(path)) - closeFid(create_message.tid, p.fid, info = info) - else: - errback(OperationFailure('Failed to get attributes for %s on %s: Unable to open remote file object' % ( path, service_name ), messages_history)) - - def closeFid(tid, fid, info = None, error = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, info = info, error = error) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['info'] is not None: - callback(kwargs['info']) - elif kwargs['error'] is not None: - errback(OperationFailure('Failed to get attributes for %s on %s: Query failed with errorcode 0x%08x' % ( path, service_name, kwargs['error'] ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to get attributes for %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _retrieveFile_SMB2(self, service_name, path, file_obj, callback, errback, timeout = 30): - return self._retrieveFileFromOffset(service_name, path, file_obj, callback, errback, 0L, -1L, timeout) - - def _retrieveFileFromOffset_SMB2(self, service_name, path, file_obj, callback, errback, starting_offset, max_length, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - results = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES | READ_CONTROL | SYNCHRONIZE, - share_access = FILE_SHARE_READ, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_SEQUENTIAL_ONLY | FILE_NON_DIRECTORY_FILE, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, createCB, errback, tid = tid) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - m = SMB2Message(SMB2QueryInfoRequest(create_message.payload.fid, - flags = 0, - additional_info = 0, - info_type = SMB2_INFO_FILE, - file_info_class = 0x16, # FileStreamInformation [MS-FSCC] 2.4 - input_buf = '', - output_buf_len = 4096)) - m.tid = create_message.tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, infoCB, errback, - fid = create_message.payload.fid, file_attributes = create_message.payload.file_attributes) - messages_history.append(m) - else: - errback(OperationFailure('Failed to list %s on %s: Unable to open file' % ( path, service_name ), messages_history)) - - def infoCB(info_message, **kwargs): - messages_history.append(info_message) - if info_message.status == 0: - file_len = struct.unpack('<Q', info_message.payload.data[8:16])[0] - if max_length == 0 or starting_offset > file_len: - closeFid(info_message.tid, kwargs['fid']) - callback(( file_obj, kwargs['file_attributes'], 0 )) # Note that this is a tuple of 3-elements - else: - remaining_len = max_length - if remaining_len < 0: - remaining_len = file_len - if starting_offset + remaining_len > file_len: - remaining_len = file_len - starting_offset - sendRead(info_message.tid, kwargs['fid'], starting_offset, remaining_len, 0, kwargs['file_attributes']) - else: - errback(OperationFailure('Failed to list %s on %s: Unable to retrieve information on file' % ( path, service_name ), messages_history)) - - def sendRead(tid, fid, offset, remaining_len, read_len, file_attributes): - read_count = min(self.max_read_size, remaining_len) - m = SMB2Message(SMB2ReadRequest(fid, offset, read_count)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, - fid = fid, offset = offset, - remaining_len = remaining_len, - read_len = read_len, - file_attributes = file_attributes) - - def readCB(read_message, **kwargs): - # To avoid crazy memory usage when retrieving large files, we do not save every read_message in messages_history. - if read_message.status == 0: - data_len = read_message.payload.data_length - file_obj.write(read_message.payload.data) - - remaining_len = kwargs['remaining_len'] - data_len - - if remaining_len > 0: - sendRead(read_message.tid, kwargs['fid'], kwargs['offset'] + data_len, remaining_len, kwargs['read_len'] + data_len, kwargs['file_attributes']) - else: - closeFid(read_message.tid, kwargs['fid'], ret = ( file_obj, kwargs['file_attributes'], kwargs['read_len'] + data_len )) - else: - messages_history.append(read_message) - closeFid(read_message.tid, kwargs['fid'], error = read_message.status) - - def closeFid(tid, fid, ret = None, error = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, ret = ret, error = error) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['ret'] is not None: - callback(kwargs['ret']) - elif kwargs['error'] is not None: - errback(OperationFailure('Failed to retrieve %s on %s: Read failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to retrieve %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _storeFile_SMB2(self, service_name, path, file_obj, callback, errback, timeout = 30): - self._storeFileFromOffset_SMB2(service_name, path, file_obj, callback, errback, 0L, True, timeout) - - def _storeFileFromOffset_SMB2(self, service_name, path, file_obj, callback, errback, starting_offset, truncate = False, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 20 00 00 00 10 00 04 00 -00 00 18 00 08 00 00 00 41 6c 53 69 00 00 00 00 -85 62 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = ATTR_ARCHIVE, - access_mask = FILE_READ_DATA | FILE_WRITE_DATA | FILE_APPEND_DATA | FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | FILE_READ_EA | FILE_WRITE_EA | READ_CONTROL | SYNCHRONIZE, - share_access = 0, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_SEQUENTIAL_ONLY | FILE_NON_DIRECTORY_FILE, - create_disp = FILE_OVERWRITE_IF if truncate else FILE_OPEN_IF, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - sendWrite(create_message.tid, create_message.payload.fid, starting_offset) - else: - errback(OperationFailure('Failed to store %s on %s: Unable to open file' % ( path, service_name ), messages_history)) - - def sendWrite(tid, fid, offset): - write_count = self.max_write_size - data = file_obj.read(write_count) - data_len = len(data) - if data_len > 0: - m = SMB2Message(SMB2WriteRequest(fid, data, offset)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, writeCB, errback, fid = fid, offset = offset+data_len) - else: - closeFid(tid, fid, offset = offset) - - def writeCB(write_message, **kwargs): - # To avoid crazy memory usage when saving large files, we do not save every write_message in messages_history. - if write_message.status == 0: - sendWrite(write_message.tid, kwargs['fid'], kwargs['offset']) - else: - messages_history.append(write_message) - closeFid(write_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to store %s on %s: Write failed' % ( path, service_name ), messages_history)) - - def closeFid(tid, fid, error = None, offset = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback, fid = fid, offset = offset, error = error) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['offset'] is not None: - callback(( file_obj, kwargs['offset'] )) # Note that this is a tuple of 2-elements - elif kwargs['error'] is not None: - errback(OperationFailure('Failed to store %s on %s: Write failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to store %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - - def _deleteFiles_SMB2(self, service_name, path_file_pattern, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path_file_pattern.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = DELETE | FILE_READ_ATTRIBUTES, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_NON_DIRECTORY_FILE, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) - messages_history.append(m) - - def createCB(open_message, **kwargs): - messages_history.append(open_message) - if open_message.status == 0: - sendDelete(open_message.tid, open_message.payload.fid) - else: - errback(OperationFailure('Failed to delete %s on %s: Unable to open file' % ( path, service_name ), messages_history)) - - def sendDelete(tid, fid): - m = SMB2Message(SMB2SetInfoRequest(fid, - additional_info = 0, - info_type = SMB2_INFO_FILE, - file_info_class = 0x0d, # SMB2_FILE_DISPOSITION_INFO - data = '\x01')) - ''' - Resources: - https://msdn.microsoft.com/en-us/library/cc246560.aspx - https://msdn.microsoft.com/en-us/library/cc232098.aspx - ''' - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback, fid = fid) - messages_history.append(m) - - def deleteCB(delete_message, **kwargs): - messages_history.append(delete_message) - if delete_message.status == 0: - closeFid(delete_message.tid, kwargs['fid'], status = 0) - else: - closeFid(delete_message.tid, kwargs['fid'], status = delete_message.status) - - def closeFid(tid, fid, status = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback, status = status) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['status'] == 0: - callback(path_file_pattern) - else: - errback(OperationFailure('Failed to delete %s on %s: Delete failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _resetFileAttributes_SMB2(self, service_name, path_file_pattern, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path_file_pattern.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = FILE_WRITE_ATTRIBUTES, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = 0, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) - messages_history.append(m) - - def createCB(open_message, **kwargs): - messages_history.append(open_message) - if open_message.status == 0: - sendReset(open_message.tid, open_message.payload.fid) - else: - errback(OperationFailure('Failed to reset attributes of %s on %s: Unable to open file' % ( path, service_name ), messages_history)) - - def sendReset(tid, fid): - m = SMB2Message(SMB2SetInfoRequest(fid, - additional_info = 0, - info_type = SMB2_INFO_FILE, - file_info_class = 4, # FileBasicInformation - data = struct.pack('qqqqii',0,0,0,0,0x80,0))) # FILE_ATTRIBUTE_NORMAL - ''' - Resources: - https://msdn.microsoft.com/en-us/library/cc246560.aspx - https://msdn.microsoft.com/en-us/library/cc232064.aspx - https://msdn.microsoft.com/en-us/library/cc232094.aspx - https://msdn.microsoft.com/en-us/library/cc232110.aspx - ''' - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, resetCB, errback, fid = fid) - messages_history.append(m) - - def resetCB(reset_message, **kwargs): - messages_history.append(reset_message) - if reset_message.status == 0: - closeFid(reset_message.tid, kwargs['fid'], status = 0) - else: - closeFid(reset_message.tid, kwargs['fid'], status = reset_message.status) - - def closeFid(tid, fid, status = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback, status = status) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['status'] == 0: - callback(path_file_pattern) - else: - errback(OperationFailure('Failed to reset attributes of %s on %s: Reset failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to reset attributes of %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _createDirectory_SMB2(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_EA | FILE_WRITE_EA | FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | READ_CONTROL | DELETE | SYNCHRONIZE, - share_access = 0, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT, - create_disp = FILE_CREATE, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - closeFid(create_message.tid, create_message.payload.fid) - else: - errback(OperationFailure('Failed to create directory %s on %s: Create failed' % ( path, service_name ), messages_history)) - - def closeFid(tid, fid): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - callback(path) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to create directory %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _deleteDirectory_SMB2(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = DELETE | FILE_READ_ATTRIBUTES, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_DIRECTORY_FILE, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) - messages_history.append(m) - - def createCB(open_message, **kwargs): - messages_history.append(open_message) - if open_message.status == 0: - sendDelete(open_message.tid, open_message.payload.fid) - else: - errback(OperationFailure('Failed to delete %s on %s: Unable to open directory' % ( path, service_name ), messages_history)) - - def sendDelete(tid, fid): - m = SMB2Message(SMB2SetInfoRequest(fid, - additional_info = 0, - info_type = SMB2_INFO_FILE, - file_info_class = 0x0d, # SMB2_FILE_DISPOSITION_INFO - data = '\x01')) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback, fid = fid) - messages_history.append(m) - - def deleteCB(delete_message, **kwargs): - messages_history.append(delete_message) - if delete_message.status == 0: - closeFid(delete_message.tid, kwargs['fid'], status = 0) - else: - closeFid(delete_message.tid, kwargs['fid'], status = delete_message.status) - - def closeFid(tid, fid, status = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, status = status) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['status'] == 0: - callback(path) - else: - errback(OperationFailure('Failed to delete %s on %s: Delete failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _rename_SMB2(self, service_name, old_path, new_path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - messages_history = [ ] - - new_path = new_path.replace('/', '\\') - if new_path.startswith('\\'): - new_path = new_path[1:] - if new_path.endswith('\\'): - new_path = new_path[:-1] - - old_path = old_path.replace('/', '\\') - if old_path.startswith('\\'): - old_path = old_path[1:] - if old_path.endswith('\\'): - old_path = old_path[:-1] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 -51 46 69 64 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(old_path, - file_attributes = 0, - access_mask = DELETE | FILE_READ_ATTRIBUTES | SYNCHRONIZE, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_SYNCHRONOUS_IO_NONALERT, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - sendRename(create_message.tid, create_message.payload.fid) - else: - errback(OperationFailure('Failed to rename %s on %s: Unable to open file/directory' % ( old_path, service_name ), messages_history)) - - def sendRename(tid, fid): - data = '\x00'*16 + struct.pack('<I', len(new_path)*2) + new_path.encode('UTF-16LE') - m = SMB2Message(SMB2SetInfoRequest(fid, - additional_info = 0, - info_type = SMB2_INFO_FILE, - file_info_class = 0x0a, # SMB2_FILE_RENAME_INFO - data = data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, renameCB, errback, fid = fid) - messages_history.append(m) - - def renameCB(rename_message, **kwargs): - messages_history.append(rename_message) - if rename_message.status == 0: - closeFid(rename_message.tid, kwargs['fid'], status = 0) - else: - closeFid(rename_message.tid, kwargs['fid'], status = rename_message.status) - - def closeFid(tid, fid, status = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, status = status) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['status'] == 0: - callback(( old_path, new_path )) - else: - errback(OperationFailure('Failed to rename %s on %s: Rename failed' % ( old_path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to rename %s on %s: Unable to connect to shared device' % ( old_path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _listSnapshots_SMB2(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendCreate(tid): - create_context_data = binascii.unhexlify(""" -28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 -44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 -00 00 00 00 00 00 00 00 00 00 00 00 10 00 04 00 -00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMB2Message(SMB2CreateRequest(path, - file_attributes = 0, - access_mask = FILE_READ_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE, - oplock = SMB2_OPLOCK_LEVEL_NONE, - impersonation = SEC_IMPERSONATE, - create_options = FILE_SYNCHRONOUS_IO_NONALERT, - create_disp = FILE_OPEN, - create_context_data = create_context_data)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if create_message.status == 0: - sendEnumSnapshots(create_message.tid, create_message.payload.fid) - else: - errback(OperationFailure('Failed to list snapshots %s on %s: Unable to open file/directory' % ( old_path, service_name ), messages_history)) - - def sendEnumSnapshots(tid, fid): - m = SMB2Message(SMB2IoctlRequest(fid, - ctlcode = 0x00144064, # FSCTL_SRV_ENUMERATE_SNAPSHOTS - flags = 0x0001, - in_data = '')) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, enumSnapshotsCB, errback, tid = tid, fid = fid) - messages_history.append(m) - - def enumSnapshotsCB(enum_message, **kwargs): - messages_history.append(enum_message) - if enum_message.status == 0: - results = [ ] - snapshots_count = struct.unpack('<I', enum_message.payload.out_data[4:8])[0] - for i in range(0, snapshots_count): - s = enum_message.payload.out_data[12+i*50:12+48+i*50].decode('UTF-16LE') - results.append(datetime(*map(int, ( s[5:9], s[10:12], s[13:15], s[16:18], s[19:21], s[22:24] )))) - closeFid(kwargs['tid'], kwargs['fid'], results = results) - else: - closeFid(kwargs['tid'], kwargs['fid'], status = enum_message.status) - - def closeFid(tid, fid, status = None, results = None): - m = SMB2Message(SMB2CloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, status = status, results = results) - messages_history.append(m) - - def closeCB(close_message, **kwargs): - if kwargs['results'] is not None: - callback(kwargs['results']) - else: - errback(OperationFailure('Failed to list snapshots %s on %s: List failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if connect_message.status == 0: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to list snapshots %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMB2Message(SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _echo_SMB2(self, data, callback, errback, timeout = 30): - messages_history = [ ] - - def echoCB(echo_message, **kwargs): - messages_history.append(echo_message) - if echo_message.status == 0: - callback(data) - else: - errback(OperationFailure('Echo failed', messages_history)) - - m = SMB2Message(SMB2EchoRequest()) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, echoCB, errback) - messages_history.append(m) - - - # - # SMB1 Methods Family - # - - def _sendSMBMessage_SMB1(self, smb_message): - if smb_message.mid == 0: - smb_message.mid = self._getNextMID_SMB1() - if not smb_message.uid: - smb_message.uid = self.uid - if self.is_signing_active: - smb_message.flags2 |= SMB_FLAGS2_SMB_SECURITY_SIGNATURE - - # Increment the next_signing_id as described in [MS-CIFS] 3.2.4.1.3 - smb_message.security = self.next_signing_id - self.next_signing_id += 2 # All our defined messages currently have responses, so always increment by 2 - raw_data = smb_message.encode() - - md = ntlm.MD5(self.signing_session_key) - if self.signing_challenge_response: - md.update(self.signing_challenge_response) - md.update(raw_data) - signature = md.digest()[:8] - - self.log.debug('MID is %d. Signing ID is %d. Signature is %s. Total raw message is %d bytes', smb_message.mid, smb_message.security, binascii.hexlify(signature), len(raw_data)) - smb_message.raw_data = raw_data[:14] + signature + raw_data[22:] - else: - smb_message.raw_data = smb_message.encode() - self.sendNMBMessage(smb_message.raw_data) - - def _getNextMID_SMB1(self): - self.mid += 1 - if self.mid >= 0xFFFF: # MID cannot be 0xFFFF. [MS-CIFS]: 2.2.1.6.2 - # We don't use MID of 0 as MID can be reused for SMB_COM_TRANSACTION2_SECONDARY messages - # where if mid=0, _sendSMBMessage will re-assign new MID values again - self.mid = 1 - return self.mid - - def _updateState_SMB1(self, message): - if message.isReply: - if message.command == SMB_COM_NEGOTIATE: - if not message.status.hasError: - self.has_negotiated = True - self.log.info('SMB dialect negotiation successful (ExtendedSecurity:%s)', message.hasExtendedSecurity) - self._updateServerInfo(message.payload) - self._handleNegotiateResponse(message) - else: - raise ProtocolError('Unknown status value (0x%08X) in SMB_COM_NEGOTIATE' % message.status.internal_value, - message.raw_data, message) - elif message.command == SMB_COM_SESSION_SETUP_ANDX: - if message.hasExtendedSecurity: - if not message.status.hasError: - try: - result = securityblob.decodeAuthResponseSecurityBlob(message.payload.security_blob) - if result == securityblob.RESULT_ACCEPT_COMPLETED: - self.log.debug('SMB uid is now %d', message.uid) - self.uid = message.uid - self.has_authenticated = True - self.log.info('Authentication (with extended security) successful!') - self.onAuthOK() - else: - raise ProtocolError('SMB_COM_SESSION_SETUP_ANDX status is 0 but security blob negResult value is %d' % result, message.raw_data, message) - except securityblob.BadSecurityBlobError, ex: - raise ProtocolError(str(ex), message.raw_data, message) - elif message.status.internal_value == 0xc0000016: # STATUS_MORE_PROCESSING_REQUIRED - try: - result, ntlm_token = securityblob.decodeChallengeSecurityBlob(message.payload.security_blob) - if result == securityblob.RESULT_ACCEPT_INCOMPLETE: - self._handleSessionChallenge(message, ntlm_token) - except ( securityblob.BadSecurityBlobError, securityblob.UnsupportedSecurityProvider ), ex: - raise ProtocolError(str(ex), message.raw_data, message) - elif message.status.internal_value == 0xc000006d: # STATUS_LOGON_FAILURE - self.has_authenticated = False - self.log.info('Authentication (with extended security) failed. Please check username and password. You may need to enable/disable NTLMv2 authentication.') - self.onAuthFailed() - else: - raise ProtocolError('Unknown status value (0x%08X) in SMB_COM_SESSION_SETUP_ANDX (with extended security)' % message.status.internal_value, - message.raw_data, message) - else: - if message.status.internal_value == 0: - self.log.debug('SMB uid is now %d', message.uid) - self.uid = message.uid - self.has_authenticated = True - self.log.info('Authentication (without extended security) successful!') - self.onAuthOK() - else: - self.has_authenticated = False - self.log.info('Authentication (without extended security) failed. Please check username and password') - self.onAuthFailed() - elif message.command == SMB_COM_TREE_CONNECT_ANDX: - try: - req = self.pending_requests[message.mid] - except KeyError: - pass - else: - if not message.status.hasError: - self.connected_trees[req.kwargs['path']] = message.tid - - req = self.pending_requests.pop(message.mid, None) - if req: - req.callback(message, **req.kwargs) - return True - - - def _updateServerInfo_SMB1(self, payload): - self.capabilities = payload.capabilities - self.security_mode = payload.security_mode - self.max_raw_size = payload.max_raw_size - self.max_buffer_size = payload.max_buffer_size - self.max_mpx_count = payload.max_mpx_count - self.use_plaintext_authentication = not bool(payload.security_mode & NEGOTIATE_ENCRYPT_PASSWORDS) - - if self.use_plaintext_authentication: - self.log.warning('Remote server only supports plaintext authentication. Your password can be stolen easily over the network.') - - - def _handleSessionChallenge_SMB1(self, message, ntlm_token): - assert message.hasExtendedSecurity - - if message.uid and not self.uid: - self.uid = message.uid - - server_challenge, server_flags, server_info = ntlm.decodeChallengeMessage(ntlm_token) - if self.use_ntlm_v2: - self.log.info('Performing NTLMv2 authentication (with extended security) with server challenge "%s"', binascii.hexlify(server_challenge)) - nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV2(self.password, - self.username, - server_challenge, - server_info, - self.domain) - - else: - self.log.info('Performing NTLMv1 authentication (with extended security) with server challenge "%s"', binascii.hexlify(server_challenge)) - nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV1(self.password, server_challenge, True) - - ntlm_data = ntlm.generateAuthenticateMessage(server_flags, - nt_challenge_response, - lm_challenge_response, - session_key, - self.username, - self.domain) - - if self.log.isEnabledFor(logging.DEBUG): - self.log.debug('NT challenge response is "%s" (%d bytes)', binascii.hexlify(nt_challenge_response), len(nt_challenge_response)) - self.log.debug('LM challenge response is "%s" (%d bytes)', binascii.hexlify(lm_challenge_response), len(lm_challenge_response)) - - blob = securityblob.generateAuthSecurityBlob(ntlm_data) - self._sendSMBMessage(SMBMessage(ComSessionSetupAndxRequest__WithSecurityExtension(0, blob))) - - if self.security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRE: - self.log.info('Server requires all SMB messages to be signed') - self.is_signing_active = (self.sign_options != SMB.SIGN_NEVER) - elif self.security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLE: - self.log.info('Server supports SMB signing') - self.is_signing_active = (self.sign_options == SMB.SIGN_WHEN_SUPPORTED) - else: - self.is_signing_active = False - - if self.is_signing_active: - self.log.info("SMB signing activated. All SMB messages will be signed.") - self.signing_session_key = session_key - if self.capabilities & CAP_EXTENDED_SECURITY: - self.signing_challenge_response = None - else: - self.signing_challenge_response = blob - else: - self.log.info("SMB signing deactivated. SMB messages will NOT be signed.") - - - def _handleNegotiateResponse_SMB1(self, message): - if message.uid and not self.uid: - self.uid = message.uid - - if message.hasExtendedSecurity or message.payload.supportsExtendedSecurity: - ntlm_data = ntlm.generateNegotiateMessage() - blob = securityblob.generateNegotiateSecurityBlob(ntlm_data) - self._sendSMBMessage(SMBMessage(ComSessionSetupAndxRequest__WithSecurityExtension(message.payload.session_key, blob))) - else: - nt_password, _, _ = ntlm.generateChallengeResponseV1(self.password, message.payload.challenge, False) - self.log.info('Performing NTLMv1 authentication (without extended security) with challenge "%s" and hashed password of "%s"', - binascii.hexlify(message.payload.challenge), - binascii.hexlify(nt_password)) - self._sendSMBMessage(SMBMessage(ComSessionSetupAndxRequest__NoSecurityExtension(message.payload.session_key, - self.username, - nt_password, - True, - self.domain))) - - def _listShares_SMB1(self, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = 'IPC$' - messages_history = [ ] - - def connectSrvSvc(tid): - m = SMBMessage(ComNTCreateAndxRequest('\\srvsvc', - flags = NT_CREATE_REQUEST_EXTENDED_RESPONSE, - access_mask = READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | FILE_WRITE_EA | FILE_READ_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | FILE_READ_DATA, - share_access = FILE_SHARE_READ | FILE_SHARE_WRITE, - create_disp = FILE_OPEN, - create_options = FILE_OPEN_NO_RECALL | FILE_NON_DIRECTORY_FILE, - impersonation = SEC_IMPERSONATE, - security_flags = 0)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectSrvSvcCB, errback) - messages_history.append(m) - - def connectSrvSvcCB(create_message, **kwargs): - messages_history.append(create_message) - if not create_message.status.hasError: - call_id = self._getNextRPCCallID() - # See [MS-CIFS]: 2.2.5.6.1 for more information on TRANS_TRANSACT_NMPIPE (0x0026) parameters - setup_bytes = struct.pack('<HH', 0x0026, create_message.payload.fid) - # The data_bytes are binding call to Server Service RPC using DCE v1.1 RPC over SMB. See [MS-SRVS] and [C706] - # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream - data_bytes = \ - binascii.unhexlify("""05 00 0b 03 10 00 00 00 48 00 00 00""".replace(' ', '')) + \ - struct.pack('<I', call_id) + \ - binascii.unhexlify(""" -b8 10 b8 10 00 00 00 00 01 00 00 00 00 00 01 00 -c8 4f 32 4b 70 16 d3 01 12 78 5a 47 bf 6e e1 88 -03 00 00 00 04 5d 88 8a eb 1c c9 11 9f e8 08 00 -2b 10 48 60 02 00 00 00""".replace(' ', '').replace('\n', '')) - m = SMBMessage(ComTransactionRequest(max_params_count = 0, - max_data_count = 4280, - max_setup_count = 0, - data_bytes = data_bytes, - setup_bytes = setup_bytes)) - m.tid = create_message.tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, rpcBindCB, errback, fid = create_message.payload.fid) - messages_history.append(m) - else: - errback(OperationFailure('Failed to list shares: Unable to locate Server Service RPC endpoint', messages_history)) - - def rpcBindCB(trans_message, **kwargs): - messages_history.append(trans_message) - if not trans_message.status.hasError: - call_id = self._getNextRPCCallID() - - padding = '' - server_len = len(self.remote_name) + 1 - server_bytes_len = server_len * 2 - if server_len % 2 != 0: - padding = '\0\0' - server_bytes_len += 2 - - # See [MS-CIFS]: 2.2.5.6.1 for more information on TRANS_TRANSACT_NMPIPE (0x0026) parameters - setup_bytes = struct.pack('<HH', 0x0026, kwargs['fid']) - # The data bytes are the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. - # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream - data_bytes = \ - binascii.unhexlify("""05 00 00 03 10 00 00 00""".replace(' ', '')) + \ - struct.pack('<HHI', 72+server_bytes_len, 0, call_id) + \ - binascii.unhexlify("""4c 00 00 00 00 00 0f 00 00 00 02 00""".replace(' ', '')) + \ - struct.pack('<III', server_len, 0, server_len) + \ - (self.remote_name + '\0').encode('UTF-16LE') + padding + \ - binascii.unhexlify(""" -01 00 00 00 01 00 00 00 04 00 02 00 00 00 00 00 -00 00 00 00 ff ff ff ff 08 00 02 00 00 00 00 00 -""".replace(' ', '').replace('\n', '')) - m = SMBMessage(ComTransactionRequest(max_params_count = 0, - max_data_count = 4280, - max_setup_count = 0, - data_bytes = data_bytes, - setup_bytes = setup_bytes)) - m.tid = trans_message.tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, listShareResultsCB, errback, fid = kwargs['fid']) - messages_history.append(m) - else: - closeFid(trans_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to list shares: Unable to bind to Server Service RPC endpoint', messages_history)) - - def listShareResultsCB(result_message, **kwargs): - messages_history.append(result_message) - if not result_message.status.hasError: - # The payload.data_bytes will contain the results of the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. - data_bytes = result_message.payload.data_bytes - - if ord(data_bytes[3]) & 0x02 == 0: - sendReadRequest(result_message.tid, kwargs['fid'], data_bytes) - else: - decodeResults(result_message.tid, kwargs['fid'], data_bytes) - else: - closeFid(result_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) - - def decodeResults(tid, fid, data_bytes): - shares_count = struct.unpack('<I', data_bytes[36:40])[0] - results = [ ] # A list of SharedDevice instances - offset = 36 + 12 # You need to study the byte stream to understand the meaning of these constants - for i in range(0, shares_count): - results.append(SharedDevice(struct.unpack('<I', data_bytes[offset+4:offset+8])[0], None, None)) - offset += 12 - - for i in range(0, shares_count): - max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) - offset += 12 - results[i].name = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') - - if length % 2 != 0: - offset += (length * 2 + 2) - else: - offset += (length * 2) - - max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) - offset += 12 - results[i].comments = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') - - if length % 2 != 0: - offset += (length * 2 + 2) - else: - offset += (length * 2) - - closeFid(tid, fid) - callback(results) - - def sendReadRequest(tid, fid, data_bytes): - read_count = min(4280, self.max_raw_size - 2) - m = SMBMessage(ComReadAndxRequest(fid = fid, - offset = 0, - max_return_bytes_count = read_count, - min_return_bytes_count = read_count)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, fid = fid, data_bytes = data_bytes) - - def readCB(read_message, **kwargs): - messages_history.append(read_message) - if not read_message.status.hasError: - data_len = read_message.payload.data_length - data_bytes = read_message.payload.data - - if ord(data_bytes[3]) & 0x02 == 0: - sendReadRequest(read_message.tid, kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:data_len-24]) - else: - decodeResults(read_message.tid, kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:data_len-24]) - else: - closeFid(read_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) - - def closeFid(tid, fid): - m = SMBMessage(ComCloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - messages_history.append(m) - - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[path] = connect_message.tid - connectSrvSvc(connect_message.tid) - else: - errback(OperationFailure('Failed to list shares: Unable to connect to IPC$', messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), path ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = path) - messages_history.append(m) - - def _listPath_SMB1(self, service_name, path, callback, errback, search, pattern, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if not path.endswith('\\'): - path += '\\' - messages_history = [ ] - results = [ ] - - def sendFindFirst(tid, support_dfs=False): - setup_bytes = struct.pack('<H', 0x0001) # TRANS2_FIND_FIRST2 sub-command. See [MS-CIFS]: 2.2.6.2.1 - params_bytes = \ - struct.pack('<HHHHI', - search, # SearchAttributes - 100, # SearchCount - 0x0006, # Flags: SMB_FIND_CLOSE_AT_EOS | SMB_FIND_RETURN_RESUME_KEYS - 0x0104, # InfoLevel: SMB_FIND_FILE_BOTH_DIRECTORY_INFO - 0x0000) # SearchStorageType - if support_dfs: - params_bytes += ("\\" + self.remote_name + "\\" + service_name + path + pattern + '\0').encode('UTF-16LE') - else: - params_bytes += (path + pattern).encode('UTF-16LE') - - m = SMBMessage(ComTransaction2Request(max_params_count = 10, - max_data_count = 16644, - max_setup_count = 0, - params_bytes = params_bytes, - setup_bytes = setup_bytes)) - m.tid = tid - if support_dfs: - m.flags2 |= SMB_FLAGS2_DFS - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, findFirstCB, errback, support_dfs=support_dfs) - messages_history.append(m) - - def decodeFindStruct(data_bytes): - # SMB_FIND_FILE_BOTH_DIRECTORY_INFO structure. See [MS-CIFS]: 2.2.8.1.7 and [MS-SMB]: 2.2.8.1.1 - info_format = '<IIQQQQQQIIIBB24s' - info_size = struct.calcsize(info_format) - - data_length = len(data_bytes) - offset = 0 - while offset < data_length: - if offset + info_size > data_length: - return data_bytes[offset:] - - next_offset, _, \ - create_time, last_access_time, last_write_time, last_attr_change_time, \ - file_size, alloc_size, file_attributes, filename_length, ea_size, \ - short_name_length, _, short_name = struct.unpack(info_format, data_bytes[offset:offset+info_size]) - - offset2 = offset + info_size - if offset2 + filename_length > data_length: - return data_bytes[offset:] - - filename = data_bytes[offset2:offset2+filename_length].decode('UTF-16LE') - short_name = short_name.decode('UTF-16LE') - results.append(SharedFile(convertFILETIMEtoEpoch(create_time), convertFILETIMEtoEpoch(last_access_time), - convertFILETIMEtoEpoch(last_write_time), convertFILETIMEtoEpoch(last_attr_change_time), - file_size, alloc_size, file_attributes, short_name, filename)) - - if next_offset: - offset += next_offset - else: - break - return '' - - def findFirstCB(find_message, **kwargs): - messages_history.append(find_message) - if not find_message.status.hasError: - if not kwargs.has_key('total_count'): - # TRANS2_FIND_FIRST2 response. [MS-CIFS]: 2.2.6.2.2 - sid, search_count, end_of_search, _, last_name_offset = struct.unpack('<HHHHH', find_message.payload.params_bytes[:10]) - kwargs.update({ 'sid': sid, 'end_of_search': end_of_search, 'last_name_offset': last_name_offset, 'data_buf': '' }) - else: - sid, end_of_search, last_name_offset = kwargs['sid'], kwargs['end_of_search'], kwargs['last_name_offset'] - - send_next = True - if find_message.payload.data_bytes: - d = decodeFindStruct(kwargs['data_buf'] + find_message.payload.data_bytes) - if not kwargs.has_key('data_count'): - if len(find_message.payload.data_bytes) != find_message.payload.total_data_count: - kwargs.update({ 'data_count': len(find_message.payload.data_bytes), - 'total_count': find_message.payload.total_data_count, - 'data_buf': d, - }) - send_next = False - else: - kwargs['data_count'] += len(find_message.payload.data_bytes) - kwargs['total_count'] = min(find_message.payload.total_data_count, kwargs['total_count']) - kwargs['data_buf'] = d - if kwargs['data_count'] != kwargs['total_count']: - send_next = False - - if not send_next: - self.pending_requests[find_message.mid] = _PendingRequest(find_message.mid, expiry_time, findFirstCB, errback, **kwargs) - elif end_of_search: - callback(results) - else: - sendFindNext(find_message.tid, sid, last_name_offset, kwargs.get('support_dfs', False)) - else: - errback(OperationFailure('Failed to list %s on %s: Unable to retrieve file list' % ( path, service_name ), messages_history)) - - def sendFindNext(tid, sid, resume_key, support_dfs=False): - setup_bytes = struct.pack('<H', 0x0002) # TRANS2_FIND_NEXT2 sub-command. See [MS-CIFS]: 2.2.6.3.1 - params_bytes = \ - struct.pack('<HHHIH', - sid, # SID - 100, # SearchCount - 0x0104, # InfoLevel: SMB_FIND_FILE_BOTH_DIRECTORY_INFO - resume_key, # ResumeKey - 0x000a) # Flags: SMB_FIND_RETURN_RESUME_KEYS | SMB_FIND_CLOSE_AT_EOS | SMB_FIND_RETURN_RESUME_KEYS - if support_dfs: - params_bytes += ("\\" + self.remote_name + "\\" + service_name + path + pattern + '\0').encode('UTF-16LE') - else: - params_bytes += (path + pattern).encode('UTF-16LE') - - m = SMBMessage(ComTransaction2Request(max_params_count = 10, - max_data_count = 16644, - max_setup_count = 0, - params_bytes = params_bytes, - setup_bytes = setup_bytes)) - m.tid = tid - if support_dfs: - m.flags2 |= SMB_FLAGS2_DFS - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, findNextCB, errback, sid = sid, support_dfs = support_dfs) - messages_history.append(m) - - def findNextCB(find_message, **kwargs): - messages_history.append(find_message) - if not find_message.status.hasError: - if not kwargs.has_key('total_count'): - # TRANS2_FIND_NEXT2 response. [MS-CIFS]: 2.2.6.3.2 - search_count, end_of_search, _, last_name_offset = struct.unpack('<HHHH', find_message.payload.params_bytes[:8]) - kwargs.update({ 'end_of_search': end_of_search, 'last_name_offset': last_name_offset, 'data_buf': '' }) - else: - end_of_search, last_name_offset = kwargs['end_of_search'], kwargs['last_name_offset'] - - send_next = True - if find_message.payload.data_bytes: - d = decodeFindStruct(kwargs['data_buf'] + find_message.payload.data_bytes) - if not kwargs.has_key('data_count'): - if len(find_message.payload.data_bytes) != find_message.payload.total_data_count: - kwargs.update({ 'data_count': len(find_message.payload.data_bytes), - 'total_count': find_message.payload.total_data_count, - 'data_buf': d, - }) - send_next = False - else: - kwargs['data_count'] += len(find_message.payload.data_bytes) - kwargs['total_count'] = min(find_message.payload.total_data_count, kwargs['total_count']) - kwargs['data_buf'] = d - if kwargs['data_count'] != kwargs['total_count']: - send_next = False - - if not send_next: - self.pending_requests[find_message.mid] = _PendingRequest(find_message.mid, expiry_time, findNextCB, errback, **kwargs) - elif end_of_search: - callback(results) - else: - sendFindNext(find_message.tid, kwargs['sid'], last_name_offset, kwargs.get('support_dfs', False)) - else: - errback(OperationFailure('Failed to list %s on %s: Unable to retrieve file list' % ( path, service_name ), messages_history)) - - def sendDFSReferral(tid): - setup_bytes = struct.pack('<H', 0x0010) # TRANS2_GET_DFS_REFERRAL sub-command. See [MS-CIFS]: 2.2.6.16.1 - params_bytes = struct.pack('<H', 3) # Max referral level 3 - params_bytes += ("\\" + self.remote_name + "\\" + service_name).encode('UTF-16LE') - - m = SMBMessage(ComTransaction2Request(max_params_count = 10, - max_data_count = 16644, - max_setup_count = 0, - params_bytes = params_bytes, - setup_bytes = setup_bytes)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, dfsReferralCB, errback) - messages_history.append(m) - - def dfsReferralCB(dfs_message, **kwargs): - sendFindFirst(dfs_message.tid, True) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - if connect_message.payload.optional_support & SMB_TREE_CONNECTX_SUPPORT_DFS: - sendDFSReferral(connect_message.tid) - else: - sendFindFirst(connect_message.tid, False) - else: - errback(OperationFailure('Failed to list %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendFindFirst(self.connected_trees[service_name]) - - def _getAttributes_SMB1(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if path.startswith('\\'): - path = path[1:] - if path.endswith('\\'): - path = path[:-1] - messages_history = [ ] - - def sendQuery(tid): - setup_bytes = struct.pack('<H', 0x0005) # TRANS2_QUERY_PATH_INFORMATION sub-command. See [MS-CIFS]: 2.2.6.6.1 - params_bytes = \ - struct.pack('<HI', - 0x0107, # SMB_QUERY_FILE_ALL_INFO ([MS-CIFS] 2.2.2.3.3) - 0x0000) # Reserved - params_bytes += (path + '\0').encode('UTF-16LE') - - m = SMBMessage(ComTransaction2Request(max_params_count = 2, - max_data_count = 65535, - max_setup_count = 0, - params_bytes = params_bytes, - setup_bytes = setup_bytes)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, queryCB, errback) - messages_history.append(m) - - def queryCB(query_message, **kwargs): - messages_history.append(query_message) - if not query_message.status.hasError: - info_format = '<QQQQIIQQ' - info_size = struct.calcsize(info_format) - create_time, last_access_time, last_write_time, last_attr_change_time, \ - file_attributes, _, alloc_size, file_size = struct.unpack(info_format, query_message.payload.data_bytes[:info_size]) - - info = SharedFile(create_time, last_access_time, last_write_time, last_attr_change_time, - file_size, alloc_size, file_attributes, unicode(path), unicode(path)) - callback(info) - else: - errback(OperationFailure('Failed to get attributes for %s on %s: Read failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendQuery(connect_message.tid) - else: - errback(OperationFailure('Failed to get attributes for %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendQuery(self.connected_trees[service_name]) - - def _retrieveFile_SMB1(self, service_name, path, file_obj, callback, errback, timeout = 30): - return self._retrieveFileFromOffset(service_name, path, file_obj, callback, errback, 0L, -1L, timeout) - - def _retrieveFileFromOffset_SMB1(self, service_name, path, file_obj, callback, errback, starting_offset, max_length, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - path = path.replace('/', '\\') - messages_history = [ ] - - def sendOpen(tid): - m = SMBMessage(ComOpenAndxRequest(filename = path, - access_mode = 0x0040, # Sharing mode: Deny nothing to others - open_mode = 0x0001, # Failed if file does not exist - search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM, - timeout = timeout * 1000)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, openCB, errback) - messages_history.append(m) - - def openCB(open_message, **kwargs): - messages_history.append(open_message) - if not open_message.status.hasError: - if max_length == 0: - closeFid(open_message.tid, open_message.payload.fid) - callback(( file_obj, open_message.payload.file_attributes, 0L )) - else: - sendRead(open_message.tid, open_message.payload.fid, starting_offset, open_message.payload.file_attributes, 0L, max_length) - else: - errback(OperationFailure('Failed to retrieve %s on %s: Unable to open file' % ( path, service_name ), messages_history)) - - def sendRead(tid, fid, offset, file_attributes, read_len, remaining_len): - read_count = self.max_raw_size - 2 - m = SMBMessage(ComReadAndxRequest(fid = fid, - offset = offset, - max_return_bytes_count = read_count, - min_return_bytes_count = min(0xFFFF, read_count))) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, fid = fid, offset = offset, file_attributes = file_attributes, - read_len = read_len, remaining_len = remaining_len) - - def readCB(read_message, **kwargs): - # To avoid crazy memory usage when retrieving large files, we do not save every read_message in messages_history. - if not read_message.status.hasError: - read_len = kwargs['read_len'] - remaining_len = kwargs['remaining_len'] - data_len = read_message.payload.data_length - if max_length > 0: - if data_len > remaining_len: - file_obj.write(read_message.payload.data[:remaining_len]) - read_len += remaining_len - remaining_len = 0 - else: - file_obj.write(read_message.payload.data) - remaining_len -= data_len - read_len += data_len - else: - file_obj.write(read_message.payload.data) - read_len += data_len - - if (max_length > 0 and remaining_len <= 0) or data_len < (self.max_raw_size - 2): - closeFid(read_message.tid, kwargs['fid']) - callback(( file_obj, kwargs['file_attributes'], read_len )) # Note that this is a tuple of 3-elements - else: - sendRead(read_message.tid, kwargs['fid'], kwargs['offset']+data_len, kwargs['file_attributes'], read_len, remaining_len) - else: - messages_history.append(read_message) - closeFid(read_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to retrieve %s on %s: Read failed' % ( path, service_name ), messages_history)) - - def closeFid(tid, fid): - m = SMBMessage(ComCloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - messages_history.append(m) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendOpen(connect_message.tid) - else: - errback(OperationFailure('Failed to retrieve %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendOpen(self.connected_trees[service_name]) - - def _storeFile_SMB1(self, service_name, path, file_obj, callback, errback, timeout = 30): - self._storeFileFromOffset_SMB1(service_name, path, file_obj, callback, errback, 0L, True, timeout) - - def _storeFileFromOffset_SMB1(self, service_name, path, file_obj, callback, errback, starting_offset, truncate = False, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - path = path.replace('/', '\\') - messages_history = [ ] - - def sendOpen(tid): - m = SMBMessage(ComOpenAndxRequest(filename = path, - access_mode = 0x0041, # Sharing mode: Deny nothing to others + Open for writing - open_mode = 0x0012 if truncate else 0x0011, # Create file if file does not exist. Overwrite or append depending on truncate parameter. - search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM, - timeout = timeout * 1000)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, openCB, errback) - messages_history.append(m) - - def openCB(open_message, **kwargs): - messages_history.append(open_message) - if not open_message.status.hasError: - sendWrite(open_message.tid, open_message.payload.fid, starting_offset) - else: - errback(OperationFailure('Failed to store %s on %s: Unable to open file' % ( path, service_name ), messages_history)) - - def sendWrite(tid, fid, offset): - # For message signing, the total SMB message size must be not exceed the max_buffer_size. Non-message signing does not have this limitation - write_count = min((self.is_signing_active and (self.max_buffer_size-64)) or self.max_raw_size, 0xFFFF-1) # Need to minus 1 byte from 0xFFFF because of the first NULL byte in the ComWriteAndxRequest message data - data_bytes = file_obj.read(write_count) - data_len = len(data_bytes) - if data_len > 0: - m = SMBMessage(ComWriteAndxRequest(fid = fid, offset = offset, data_bytes = data_bytes)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, writeCB, errback, fid = fid, offset = offset+data_len) - else: - closeFid(tid, fid) - callback(( file_obj, offset )) # Note that this is a tuple of 2-elements - - def writeCB(write_message, **kwargs): - # To avoid crazy memory usage when saving large files, we do not save every write_message in messages_history. - if not write_message.status.hasError: - sendWrite(write_message.tid, kwargs['fid'], kwargs['offset']) - else: - messages_history.append(write_message) - closeFid(write_message.tid, kwargs['fid']) - errback(OperationFailure('Failed to store %s on %s: Write failed' % ( path, service_name ), messages_history)) - - def closeFid(tid, fid): - m = SMBMessage(ComCloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - messages_history.append(m) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendOpen(connect_message.tid) - else: - errback(OperationFailure('Failed to store %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendOpen(self.connected_trees[service_name]) - - def _deleteFiles_SMB1(self, service_name, path_file_pattern, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - path = path_file_pattern.replace('/', '\\') - messages_history = [ ] - - def sendDelete(tid): - m = SMBMessage(ComDeleteRequest(filename_pattern = path, - search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback) - messages_history.append(m) - - def deleteCB(delete_message, **kwargs): - messages_history.append(delete_message) - if not delete_message.status.hasError: - callback(path_file_pattern) - else: - errback(OperationFailure('Failed to store %s on %s: Delete failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendDelete(connect_message.tid) - else: - errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendDelete(self.connected_trees[service_name]) - - def _resetFileAttributes_SMB1(self, service_name, path_file_pattern, callback, errback, timeout = 30): - raise NotReadyError('resetFileAttributes is not yet implemented for SMB1') - - def _createDirectory_SMB1(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - path = path.replace('/', '\\') - messages_history = [ ] - - def sendCreate(tid): - m = SMBMessage(ComCreateDirectoryRequest(path)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback) - messages_history.append(m) - - def createCB(create_message, **kwargs): - messages_history.append(create_message) - if not create_message.status.hasError: - callback(path) - else: - errback(OperationFailure('Failed to create directory %s on %s: Create failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendCreate(connect_message.tid) - else: - errback(OperationFailure('Failed to create directory %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendCreate(self.connected_trees[service_name]) - - def _deleteDirectory_SMB1(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - path = path.replace('/', '\\') - messages_history = [ ] - - def sendDelete(tid): - m = SMBMessage(ComDeleteDirectoryRequest(path)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback) - messages_history.append(m) - - def deleteCB(delete_message, **kwargs): - messages_history.append(delete_message) - if not delete_message.status.hasError: - callback(path) - else: - errback(OperationFailure('Failed to delete directory %s on %s: Delete failed' % ( path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendDelete(connect_message.tid) - else: - errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendDelete(self.connected_trees[service_name]) - - def _rename_SMB1(self, service_name, old_path, new_path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - new_path = new_path.replace('/', '\\') - old_path = old_path.replace('/', '\\') - messages_history = [ ] - - def sendRename(tid): - m = SMBMessage(ComRenameRequest(old_path = old_path, - new_path = new_path, - search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, renameCB, errback) - messages_history.append(m) - - def renameCB(rename_message, **kwargs): - messages_history.append(rename_message) - if not rename_message.status.hasError: - callback(( old_path, new_path )) # Note that this is a tuple of 2-elements - else: - errback(OperationFailure('Failed to rename %s on %s: Rename failed' % ( old_path, service_name ), messages_history)) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendRename(connect_message.tid) - else: - errback(OperationFailure('Failed to rename %s on %s: Unable to connect to shared device' % ( old_path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendRename(self.connected_trees[service_name]) - - def _listSnapshots_SMB1(self, service_name, path, callback, errback, timeout = 30): - if not self.has_authenticated: - raise NotReadyError('SMB connection not authenticated') - - expiry_time = time.time() + timeout - path = path.replace('/', '\\') - if not path.endswith('\\'): - path += '\\' - messages_history = [ ] - results = [ ] - - def sendOpen(tid): - m = SMBMessage(ComOpenAndxRequest(filename = path, - access_mode = 0x0040, # Sharing mode: Deny nothing to others - open_mode = 0x0001, # Failed if file does not exist - search_attributes = 0, - timeout = timeout * 1000)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, openCB, errback) - messages_history.append(m) - - def openCB(open_message, **kwargs): - messages_history.append(open_message) - if not open_message.status.hasError: - sendEnumSnapshots(open_message.tid, open_message.payload.fid) - else: - errback(OperationFailure('Failed to list snapshots %s on %s: Unable to open path' % ( path, service_name ), messages_history)) - - def sendEnumSnapshots(tid, fid): - # [MS-CIFS]: 2.2.7.2 - # [MS-SMB]: 2.2.7.2.1 - setup_bytes = struct.pack('<IHBB', - 0x00144064, # [MS-SMB]: 2.2.7.2.1 - fid, # FID - 0x01, # IsFctl - 0) # IsFlags - m = SMBMessage(ComNTTransactRequest(function = 0x0002, # NT_TRANSACT_IOCTL. [MS-CIFS]: 2.2.7.2.1 - max_params_count = 0, - max_data_count = 0xFFFF, - max_setup_count = 0, - setup_bytes = setup_bytes)) - m.tid = tid - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, enumSnapshotsCB, errback, tid = tid, fid = fid) - messages_history.append(m) - - def enumSnapshotsCB(enum_message, **kwargs): - messages_history.append(enum_message) - if not enum_message.status.hasError: - results = [ ] - snapshots_count = struct.unpack('<I', enum_message.payload.data_bytes[4:8])[0] - for i in range(0, snapshots_count): - s = enum_message.payload.data_bytes[12+i*50:12+48+i*50].decode('UTF-16LE') - results.append(datetime(*map(int, ( s[5:9], s[10:12], s[13:15], s[16:18], s[19:21], s[22:24] )))) - closeFid(kwargs['tid'], kwargs['fid']) - callback(results) - else: - closeFid(kwargs['tid'], kwargs['fid']) - errback(OperationFailure('Failed to list snapshots %s on %s: Unable to list snapshots on path' % ( path, service_name ), messages_history)) - - def closeFid(tid, fid): - m = SMBMessage(ComCloseRequest(fid)) - m.tid = tid - self._sendSMBMessage(m) - messages_history.append(m) - - if not self.connected_trees.has_key(service_name): - def connectCB(connect_message, **kwargs): - messages_history.append(connect_message) - if not connect_message.status.hasError: - self.connected_trees[service_name] = connect_message.tid - sendOpen(connect_message.tid) - else: - errback(OperationFailure('Failed to list snapshots %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) - - m = SMBMessage(ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) - messages_history.append(m) - else: - sendOpen(self.connected_trees[service_name]) - - def _echo_SMB1(self, data, callback, errback, timeout = 30): - messages_history = [ ] - - def echoCB(echo_message, **kwargs): - messages_history.append(echo_message) - if not echo_message.status.hasError: - callback(echo_message.payload.data) - else: - errback(OperationFailure('Echo failed', messages_history)) - - m = SMBMessage(ComEchoRequest(echo_data = data)) - self._sendSMBMessage(m) - self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, echoCB, errback) - messages_history.append(m) - - -class SharedDevice: - """ - Contains information about a single shared device on the remote server. - """ - - # The following constants are taken from [MS-SRVS]: 2.2.2.4 - # They are used to identify the type of shared resource from the results from the NetrShareEnum in Server Service RPC - DISK_TREE = 0x00 - PRINT_QUEUE = 0x01 - COMM_DEVICE = 0x02 - IPC = 0x03 - - def __init__(self, type, name, comments): - self._type = type - self.name = name #: An unicode string containing the name of the shared device - self.comments = comments #: An unicode string containing the user description of the shared device - - @property - def type(self): - """ - Returns one of the following integral constants. - - SharedDevice.DISK_TREE - - SharedDevice.PRINT_QUEUE - - SharedDevice.COMM_DEVICE - - SharedDevice.IPC - """ - return self._type & 0xFFFF - - @property - def isSpecial(self): - """ - Returns True if this shared device is a special share reserved for interprocess communication (IPC$) - or remote administration of the server (ADMIN$). Can also refer to administrative shares such as - C$, D$, E$, and so forth - """ - return bool(self._type & 0x80000000) - - @property - def isTemporary(self): - """ - Returns True if this is a temporary share that is not persisted for creation each time the file server initializes. - """ - return bool(self._type & 0x40000000) - - def __unicode__(self): - return u'Shared device: %s (type:0x%02x comments:%s)' % (self.name, self.type, self.comments ) - - -class SharedFile: - """ - Contain information about a file/folder entry that is shared on the shared device. - - As an application developer, you should not need to instantiate a *SharedFile* instance directly in your application. - These *SharedFile* instances are usually returned via a call to *listPath* method in :doc:`smb.SMBProtocol.SMBProtocolFactory<smb_SMBProtocolFactory>`. - - If you encounter *SharedFile* instance where its short_name attribute is empty but the filename attribute contains a short name which does not correspond - to any files/folders on your remote shared device, it could be that the original filename on the file/folder entry on the shared device contains - one of these prohibited characters: "\/[]:+|<>=;?,* (see [MS-CIFS]: 2.2.1.1.1 for more details). - """ - - def __init__(self, create_time, last_access_time, last_write_time, last_attr_change_time, file_size, alloc_size, file_attributes, short_name, filename): - self.create_time = create_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of creation of this file resource on the remote server - self.last_access_time = last_access_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of last access of this file resource on the remote server - self.last_write_time = last_write_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of last modification of this file resource on the remote server - self.last_attr_change_time = last_attr_change_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of last attribute change of this file resource on the remote server - self.file_size = file_size #: File size in number of bytes - self.alloc_size = alloc_size #: Total number of bytes allocated to store this file - self.file_attributes = file_attributes #: A SMB_EXT_FILE_ATTR integer value. See [MS-CIFS]: 2.2.1.2.3 - self.short_name = short_name #: Unicode string containing the short name of this file (usually in 8.3 notation) - self.filename = filename #: Unicode string containing the long filename of this file. Each OS has a limit to the length of this file name. On Windows, it is 256 characters. - - @property - def isDirectory(self): - """A convenient property to return True if this file resource is a directory on the remote server""" - return bool(self.file_attributes & ATTR_DIRECTORY) - - @property - def isReadOnly(self): - """A convenient property to return True if this file resource is read-only on the remote server""" - return bool(self.file_attributes & ATTR_READONLY) - - def __unicode__(self): - return u'Shared file: %s (FileSize:%d bytes, isDirectory:%s)' % ( self.filename, self.file_size, self.isDirectory ) - - -class _PendingRequest: - - def __init__(self, mid, expiry_time, callback, errback, **kwargs): - self.mid = mid - self.expiry_time = expiry_time - self.callback = callback - self.errback = errback - self.kwargs = kwargs + +import logging, binascii, time, hmac +from datetime import datetime +from smb_constants import * +from smb2_constants import * +from smb_structs import * +from smb2_structs import * +from .security_descriptors import SecurityDescriptor +from nmb.base import NMBSession +from utils import convertFILETIMEtoEpoch +import ntlm, securityblob + +try: + import hashlib + sha256 = hashlib.sha256 +except ImportError: + from utils import sha256 + + +class NotReadyError(Exception): + """Raised when SMB connection is not ready (i.e. not authenticated or authentication failed)""" + pass + +class NotConnectedError(Exception): + """Raised when underlying SMB connection has been disconnected or not connected yet""" + pass + +class SMBTimeout(Exception): + """Raised when a timeout has occurred while waiting for a response or for a SMB/CIFS operation to complete.""" + pass + + +def _convert_to_unicode(string): + if not isinstance(string, unicode): + string = unicode(string, "utf-8") + return string + + +class SMB(NMBSession): + """ + This class represents a "connection" to the remote SMB/CIFS server. + It is not meant to be used directly in an application as it does not have any network transport implementations. + + For application use, please refer to + - L{SMBProtocol.SMBProtocolFactory<smb.SMBProtocol>} if you are using Twisted framework + + In [MS-CIFS], this class will contain attributes of Client, Client.Connection and Client.Session abstract data models. + + References: + =========== + - [MS-CIFS]: 3.2.1 + """ + + log = logging.getLogger('SMB.SMB') + + SIGN_NEVER = 0 + SIGN_WHEN_SUPPORTED = 1 + SIGN_WHEN_REQUIRED = 2 + + def __init__(self, username, password, my_name, remote_name, domain = '', use_ntlm_v2 = True, sign_options = SIGN_WHEN_REQUIRED, is_direct_tcp = False): + NMBSession.__init__(self, my_name, remote_name, is_direct_tcp = is_direct_tcp) + self.username = _convert_to_unicode(username) + self.password = _convert_to_unicode(password) + self.domain = _convert_to_unicode(domain) + self.sign_options = sign_options + self.is_direct_tcp = is_direct_tcp + self.use_ntlm_v2 = use_ntlm_v2 #: Similar to LMAuthenticationPolicy and NTAuthenticationPolicy as described in [MS-CIFS] 3.2.1.1 + self.smb_message = SMBMessage(self) + self.is_using_smb2 = False #: Are we communicating using SMB2 protocol? self.smb_message will be a SMB2Message instance if this flag is True + self.async_requests = { } #: AsyncID mapped to _PendingRequest instance + self.pending_requests = { } #: MID mapped to _PendingRequest instance + self.connected_trees = { } #: Share name mapped to TID + self.next_rpc_call_id = 1 #: Next RPC callID value. Not used directly in SMB message. Usually encapsulated in sub-commands under SMB_COM_TRANSACTION or SMB_COM_TRANSACTION2 messages + + self.has_negotiated = False + self.has_authenticated = False + self.is_signing_active = False #: True if the remote server accepts message signing. All outgoing messages will be signed. Simiar to IsSigningActive as described in [MS-CIFS] 3.2.1.2 + self.signing_session_key = None #: Session key for signing packets, if signing is active. Similar to SigningSessionKey as described in [MS-CIFS] 3.2.1.2 + self.signing_challenge_response = None #: Contains the challenge response for signing, if signing is active. Similar to SigningChallengeResponse as described in [MS-CIFS] 3.2.1.2 + self.mid = 0 + self.uid = 0 + self.next_signing_id = 2 #: Similar to ClientNextSendSequenceNumber as described in [MS-CIFS] 3.2.1.2 + + # SMB1 and SMB2 attributes + # Note that the interpretations of the values may differ between SMB1 and SMB2 protocols + self.capabilities = 0 + self.security_mode = 0 #: Initialized from the SecurityMode field of the SMB_COM_NEGOTIATE message + + # SMB1 attributes + # Most of the following attributes will be initialized upon receipt of SMB_COM_NEGOTIATE message from server (via self._updateServerInfo_SMB1 method) + self.use_plaintext_authentication = False #: Similar to PlaintextAuthenticationPolicy in in [MS-CIFS] 3.2.1.1 + self.max_raw_size = 0 + self.max_buffer_size = 0 #: Similar to MaxBufferSize as described in [MS-CIFS] 3.2.1.1 + self.max_mpx_count = 0 #: Similar to MaxMpxCount as described in [MS-CIFS] 3.2.1.1 + + # SMB2 attributes + self.max_read_size = 0 #: Similar to MaxReadSize as described in [MS-SMB2] 2.2.4 + self.max_write_size = 0 #: Similar to MaxWriteSize as described in [MS-SMB2] 2.2.4 + self.max_transact_size = 0 #: Similar to MaxTransactSize as described in [MS-SMB2] 2.2.4 + self.session_id = 0 #: Similar to SessionID as described in [MS-SMB2] 2.2.4. This will be set in _updateState_SMB2 method + self.smb2_dialect = 0 + + + # SMB 2.1 attributes + self.cap_leasing = False + self.cap_multi_credit = False + self.credits = 0 # how many credits we're allowed to spend per request + + self._setupSMB1Methods() + + self.log.info('Authentication with remote machine "%s" for user "%s" will be using NTLM %s authentication (%s extended security)', + self.remote_name, self.username, + (self.use_ntlm_v2 and 'v2') or 'v1', + (SUPPORT_EXTENDED_SECURITY and 'with') or 'without') + + + # + # NMBSession Methods + # + + def onNMBSessionOK(self): + self._sendSMBMessage(SMBMessage(self, ComNegotiateRequest())) + + def onNMBSessionFailed(self): + pass + + def onNMBSessionMessage(self, flags, data): + while True: + try: + i = self.smb_message.decode(data) + except SMB2ProtocolHeaderError: + self.log.info('Now switching over to SMB2 protocol communication') + self.is_using_smb2 = True + self.mid = 0 # Must reset messageID counter, or else remote SMB2 server will disconnect + self._setupSMB2Methods() + self.smb_message = self._klassSMBMessage(self) + i = self.smb_message.decode(data) + self.log.info('SMB2 dialect is 0x%04x', self.smb2_dialect) + + next_message_offset = 0 + if self.is_using_smb2: + next_message_offset = self.smb_message.next_command_offset + + # update how many credits we're allowed to spend on requests + self.credits = self.smb_message.credit_response + + # SMB2 CANCEL commands do not consume message IDs + if self.smb_message.command != SMB2_COM_CANCEL: + self.log.debug('Received SMB2 packet from server - "%s" (command:0x%02X). Credit charge recv: %s', + SMB_COMMAND_NAMES.get(self.smb_message.command, '<unknown>'), self.smb_message.command, self.smb_message.credit_charge) + if self.smb_message.credit_charge > 0: + # Let's update the sequenceWindow based on the CreditsCharged + # In the SMB 2.0.2 dialect, this field MUST NOT be used and MUST be reserved. + # The sender MUST set this to 0, and the receiver MUST ignore it. + # In all other dialects, this field indicates the number of credits that this request consumes. + self.log.debug("Updating MID to add credit charge from server...") + self.log.debug("*** Before: " + str(self.mid)) + self.mid = self.mid + (self.smb_message.credit_charge - 1) + self.log.debug("*** After: " + str(self.mid)) + + if i > 0: + if not self.is_using_smb2: + self.log.debug('Received SMB message "%s" (command:0x%2X flags:0x%02X flags2:0x%04X TID:%d UID:%d)', + SMB_COMMAND_NAMES.get(self.smb_message.command, '<unknown>'), + self.smb_message.command, self.smb_message.flags, self.smb_message.flags2, self.smb_message.tid, self.smb_message.uid) + else: + self.log.debug('Received SMB2 message "%s" (command:0x%04X flags:0x%04x)', + SMB2_COMMAND_NAMES.get(self.smb_message.command, '<unknown>'), + self.smb_message.command, self.smb_message.flags) + if self._updateState(self.smb_message): + # We need to create a new instance instead of calling reset() because the instance could be captured in the message history. + self.smb_message = self._klassSMBMessage(self) + + if next_message_offset > 0: + data = data[next_message_offset:] + else: + break + + # + # Public Methods for Overriding in Subclasses + # + + def onAuthOK(self): + pass + + def onAuthFailed(self): + pass + + # + # Protected Methods + # + + def _setupSMB1Methods(self): + self._klassSMBMessage = SMBMessage + self._updateState = self._updateState_SMB1 + self._updateServerInfo = self._updateServerInfo_SMB1 + self._handleNegotiateResponse = self._handleNegotiateResponse_SMB1 + self._sendSMBMessage = self._sendSMBMessage_SMB1 + self._handleSessionChallenge = self._handleSessionChallenge_SMB1 + self._listShares = self._listShares_SMB1 + self._listPath = self._listPath_SMB1 + self._listSnapshots = self._listSnapshots_SMB1 + self._getSecurity = self._getSecurity_SMB1 + self._getAttributes = self._getAttributes_SMB1 + self._retrieveFile = self._retrieveFile_SMB1 + self._retrieveFileFromOffset = self._retrieveFileFromOffset_SMB1 + self._storeFile = self._storeFile_SMB1 + self._storeFileFromOffset = self._storeFileFromOffset_SMB1 + self._deleteFiles = self._deleteFiles_SMB1 + self._resetFileAttributes = self._resetFileAttributes_SMB1 + self._createDirectory = self._createDirectory_SMB1 + self._deleteDirectory = self._deleteDirectory_SMB1 + self._rename = self._rename_SMB1 + self._echo = self._echo_SMB1 + + def _setupSMB2Methods(self): + self._klassSMBMessage = SMB2Message + self._updateState = self._updateState_SMB2 + self._updateServerInfo = self._updateServerInfo_SMB2 + self._handleNegotiateResponse = self._handleNegotiateResponse_SMB2 + self._sendSMBMessage = self._sendSMBMessage_SMB2 + self._handleSessionChallenge = self._handleSessionChallenge_SMB2 + self._listShares = self._listShares_SMB2 + self._listPath = self._listPath_SMB2 + self._listSnapshots = self._listSnapshots_SMB2 + self._getAttributes = self._getAttributes_SMB2 + self._getSecurity = self._getSecurity_SMB2 + self._retrieveFile = self._retrieveFile_SMB2 + self._retrieveFileFromOffset = self._retrieveFileFromOffset_SMB2 + self._storeFile = self._storeFile_SMB2 + self._storeFileFromOffset = self._storeFileFromOffset_SMB2 + self._deleteFiles = self._deleteFiles_SMB2 + self._resetFileAttributes = self._resetFileAttributes_SMB2 + self._createDirectory = self._createDirectory_SMB2 + self._deleteDirectory = self._deleteDirectory_SMB2 + self._rename = self._rename_SMB2 + self._echo = self._echo_SMB2 + + def _getNextRPCCallID(self): + self.next_rpc_call_id += 1 + return self.next_rpc_call_id + + # + # SMB2 Methods Family + # + + def _sendSMBMessage_SMB2(self, smb_message): + if smb_message.mid == 0: + smb_message.mid = self._getNextMID_SMB2() + + if smb_message.command != SMB2_COM_NEGOTIATE: + smb_message.session_id = self.session_id + + if self.is_signing_active: + smb_message.flags |= SMB2_FLAGS_SIGNED + raw_data = smb_message.encode() + smb_message.signature = hmac.new(self.signing_session_key, raw_data, sha256).digest()[:16] + + smb_message.raw_data = smb_message.encode() + self.log.debug('MID is %d. Signature is %s. Total raw message is %d bytes', smb_message.mid, binascii.hexlify(smb_message.signature), len(smb_message.raw_data)) + else: + smb_message.raw_data = smb_message.encode() + self.sendNMBMessage(smb_message.raw_data) + + def _getNextMID_SMB2(self): + self.mid += 1 + return self.mid + + def _updateState_SMB2(self, message): + if message.isReply: + if message.command == SMB2_COM_NEGOTIATE: + if message.status == 0: + + if self.smb_message.payload.dialect_revision == SMB2_DIALECT_2ALL: + # Dialects from SMB 2.1 must be negotiated in a second negotiate phase + # We send a SMB2 Negotiate Request to accomplish this + self._sendSMBMessage(SMB2Message(self, SMB2NegotiateRequest())) + else: + if self.smb_message.payload.dialect_revision == SMB2_DIALECT_21: + # We negotiated SMB 2.1. + # we must now send credit requests (MUST!) + #self.send_credits_request = True + pass + + self.has_negotiated = True + self.log.info('SMB2 dialect negotiation successful') + self.dialect = self.smb_message.payload.dialect_revision + self._updateServerInfo(message.payload) + self._handleNegotiateResponse(message) + else: + raise ProtocolError('Unknown status value (0x%08X) in SMB2_COM_NEGOTIATE' % message.status, + message.raw_data, message) + elif message.command == SMB2_COM_SESSION_SETUP: + if message.status == 0: + self.session_id = message.session_id + try: + result = securityblob.decodeAuthResponseSecurityBlob(message.payload.security_blob) + if result == securityblob.RESULT_ACCEPT_COMPLETED: + self.has_authenticated = True + self.log.info('Authentication (on SMB2) successful!') + + # [MS-SMB2]: 3.2.5.3.1 + # If the security subsystem indicates that the session was established by an anonymous user, + # Session.SigningRequired MUST be set to FALSE. + # If the SMB2_SESSION_FLAG_IS_GUEST bit is set in the SessionFlags field of the + # SMB2 SESSION_SETUP Response and if Session.SigningRequired is TRUE, this indicates a SESSION_SETUP + # failure and the connection MUST be terminated. If the SMB2_SESSION_FLAG_IS_GUEST bit is set in the SessionFlags + # field of the SMB2 SESSION_SETUP Response and if RequireMessageSigning is FALSE, Session.SigningRequired + # MUST be set to FALSE. + if message.payload.isGuestSession or message.payload.isAnonymousSession: + self.is_signing_active = False + self.log.info('Signing disabled because session is guest/anonymous') + + self.onAuthOK() + else: + raise ProtocolError('SMB2_COM_SESSION_SETUP status is 0 but security blob negResult value is %d' % result, message.raw_data, message) + except securityblob.BadSecurityBlobError, ex: + raise ProtocolError(str(ex), message.raw_data, message) + elif message.status == 0xc0000016: # STATUS_MORE_PROCESSING_REQUIRED + self.session_id = message.session_id + try: + result, ntlm_token = securityblob.decodeChallengeSecurityBlob(message.payload.security_blob) + if result == securityblob.RESULT_ACCEPT_INCOMPLETE: + self._handleSessionChallenge(message, ntlm_token) + except ( securityblob.BadSecurityBlobError, securityblob.UnsupportedSecurityProvider ), ex: + raise ProtocolError(str(ex), message.raw_data, message) + elif (message.status == 0xc000006d # STATUS_LOGON_FAILURE + or message.status == 0xc0000064 # STATUS_NO_SUCH_USER + or message.status == 0xc000006a):# STATUS_WRONG_PASSWORD + self.has_authenticated = False + self.log.info('Authentication (on SMB2) failed. Please check username and password.') + self.onAuthFailed() + elif (message.status == 0xc0000193 # STATUS_ACCOUNT_EXPIRED + or message.status == 0xC0000071): # STATUS_PASSWORD_EXPIRED + self.has_authenticated = False + self.log.info('Authentication (on SMB2) failed. Account or password has expired.') + self.onAuthFailed() + elif message.status == 0xc0000234: # STATUS_ACCOUNT_LOCKED_OUT + self.has_authenticated = False + self.log.info('Authentication (on SMB2) failed. Account has been locked due to too many invalid logon attempts.') + self.onAuthFailed() + elif message.status == 0xc0000072: # STATUS_ACCOUNT_DISABLED + self.has_authenticated = False + self.log.info('Authentication (on SMB2) failed. Account has been disabled.') + self.onAuthFailed() + elif (message.status == 0xc000006f # STATUS_INVALID_LOGON_HOURS + or message.status == 0xc000015b # STATUS_LOGON_TYPE_NOT_GRANTED + or message.status == 0xc0000070): # STATUS_INVALID_WORKSTATION + self.has_authenticated = False + self.log.info('Authentication (on SMB2) failed. Not allowed.') + self.onAuthFailed() + elif message.status == 0xc000018c: # STATUS_TRUSTED_DOMAIN_FAILURE + self.has_authenticated = False + self.log.info('Authentication (on SMB2) failed. Domain not trusted.') + self.onAuthFailed() + elif message.status == 0xc000018d: # STATUS_TRUSTED_RELATIONSHIP_FAILURE + self.has_authenticated = False + self.log.info('Authentication (on SMB2) failed. Workstation not trusted.') + self.onAuthFailed() + else: + raise ProtocolError('Unknown status value (0x%08X) in SMB_COM_SESSION_SETUP_ANDX (with extended security)' % message.status, + message.raw_data, message) + + if message.isAsync: + if message.status == 0x00000103: # STATUS_PENDING + req = self.pending_requests.pop(message.mid, None) + if req: + self.async_requests[message.async_id] = req + else: # All other status including SUCCESS + req = self.async_requests.pop(message.async_id, None) + if req: + req.callback(message, **req.kwargs) + return True + else: + req = self.pending_requests.pop(message.mid, None) + if req: + req.callback(message, **req.kwargs) + return True + + + def _updateServerInfo_SMB2(self, payload): + self.capabilities = payload.capabilities + self.security_mode = payload.security_mode + self.max_transact_size = payload.max_transact_size + self.max_read_size = payload.max_read_size + self.max_write_size = payload.max_write_size + self.use_plaintext_authentication = False # SMB2 never allows plaintext authentication + + if (self.capabilities & SMB2_GLOBAL_CAP_LEASING) == SMB2_GLOBAL_CAP_LEASING: + self.cap_leasing = True + + if (self.capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) == SMB2_GLOBAL_CAP_LARGE_MTU: + self.cap_multi_credit = True + + + def _handleNegotiateResponse_SMB2(self, message): + ntlm_data = ntlm.generateNegotiateMessage() + blob = securityblob.generateNegotiateSecurityBlob(ntlm_data) + self._sendSMBMessage(SMB2Message(self, SMB2SessionSetupRequest(blob))) + + + def _handleSessionChallenge_SMB2(self, message, ntlm_token): + server_challenge, server_flags, server_info = ntlm.decodeChallengeMessage(ntlm_token) + + self.log.info('Performing NTLMv2 authentication (on SMB2) with server challenge "%s"', binascii.hexlify(server_challenge)) + + if self.use_ntlm_v2: + self.log.info('Performing NTLMv2 authentication (on SMB2) with server challenge "%s"', binascii.hexlify(server_challenge)) + nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV2(self.password, + self.username, + server_challenge, + server_info, + self.domain) + + else: + self.log.info('Performing NTLMv1 authentication (on SMB2) with server challenge "%s"', binascii.hexlify(server_challenge)) + nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV1(self.password, server_challenge, True) + + ntlm_data = ntlm.generateAuthenticateMessage(server_flags, + nt_challenge_response, + lm_challenge_response, + session_key, + self.username, + self.domain, + self.my_name) + + if self.log.isEnabledFor(logging.DEBUG): + self.log.debug('NT challenge response is "%s" (%d bytes)', binascii.hexlify(nt_challenge_response), len(nt_challenge_response)) + self.log.debug('LM challenge response is "%s" (%d bytes)', binascii.hexlify(lm_challenge_response), len(lm_challenge_response)) + + blob = securityblob.generateAuthSecurityBlob(ntlm_data) + self._sendSMBMessage(SMB2Message(self, SMB2SessionSetupRequest(blob))) + + if self.security_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED: + self.log.info('Server requires all SMB messages to be signed') + self.is_signing_active = (self.sign_options != SMB.SIGN_NEVER) + elif self.security_mode & SMB2_NEGOTIATE_SIGNING_ENABLED: + self.log.info('Server supports SMB signing') + self.is_signing_active = (self.sign_options == SMB.SIGN_WHEN_SUPPORTED) + else: + self.is_signing_active = False + + if self.is_signing_active: + self.log.info("SMB signing activated. All SMB messages will be signed.") + self.signing_session_key = (session_key + '\0'*16)[:16] + if self.capabilities & CAP_EXTENDED_SECURITY: + self.signing_challenge_response = None + else: + self.signing_challenge_response = blob + else: + self.log.info("SMB signing deactivated. SMB messages will NOT be signed.") + + + def _listShares_SMB2(self, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = 'IPC$' + messages_history = [ ] + + def connectSrvSvc(tid): + m = SMB2Message(self, SMB2CreateRequest('srvsvc', + file_attributes = 0, + access_mask = FILE_READ_DATA | FILE_WRITE_DATA | FILE_APPEND_DATA | FILE_READ_EA | FILE_WRITE_EA | READ_CONTROL | FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | SYNCHRONIZE, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_NON_DIRECTORY_FILE | FILE_OPEN_NO_RECALL, + create_disp = FILE_OPEN)) + + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectSrvSvcCB, errback, tid = tid) + messages_history.append(m) + + def connectSrvSvcCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + call_id = self._getNextRPCCallID() + # The data_bytes are binding call to Server Service RPC using DCE v1.1 RPC over SMB. See [MS-SRVS] and [C706] + # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream + data_bytes = \ + binascii.unhexlify("""05 00 0b 03 10 00 00 00 74 00 00 00""".replace(' ', '')) + \ + struct.pack('<I', call_id) + \ + binascii.unhexlify(""" +b8 10 b8 10 00 00 00 00 02 00 00 00 00 00 01 00 +c8 4f 32 4b 70 16 d3 01 12 78 5a 47 bf 6e e1 88 +03 00 00 00 04 5d 88 8a eb 1c c9 11 9f e8 08 00 +2b 10 48 60 02 00 00 00 01 00 01 00 c8 4f 32 4b +70 16 d3 01 12 78 5a 47 bf 6e e1 88 03 00 00 00 +2c 1c b7 6c 12 98 40 45 03 00 00 00 00 00 00 00 +01 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2WriteRequest(create_message.payload.fid, data_bytes, 0)) + m.tid = kwargs['tid'] + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, rpcBindCB, errback, tid = kwargs['tid'], fid = create_message.payload.fid) + messages_history.append(m) + else: + errback(OperationFailure('Failed to list shares: Unable to locate Server Service RPC endpoint', messages_history)) + + def rpcBindCB(trans_message, **kwargs): + messages_history.append(trans_message) + if trans_message.status == 0: + m = SMB2Message(self, SMB2ReadRequest(kwargs['fid'], read_len = 1024, read_offset = 0)) + m.tid = kwargs['tid'] + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, rpcReadCB, errback, tid = kwargs['tid'], fid = kwargs['fid']) + messages_history.append(m) + else: + closeFid(kwargs['tid'], kwargs['fid'], error = 'Failed to list shares: Unable to read from Server Service RPC endpoint') + + def rpcReadCB(read_message, **kwargs): + messages_history.append(read_message) + if read_message.status == 0: + call_id = self._getNextRPCCallID() + + padding = '' + remote_name = '\\\\' + self.remote_name + server_len = len(remote_name) + 1 + server_bytes_len = server_len * 2 + if server_len % 2 != 0: + padding = '\0\0' + server_bytes_len += 2 + + # The data bytes are the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. + # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream + data_bytes = \ + binascii.unhexlify("""05 00 00 03 10 00 00 00""".replace(' ', '')) + \ + struct.pack('<HHI', 72+server_bytes_len, 0, call_id) + \ + binascii.unhexlify("""4c 00 00 00 00 00 0f 00 00 00 02 00""".replace(' ', '')) + \ + struct.pack('<III', server_len, 0, server_len) + \ + (remote_name + '\0').encode('UTF-16LE') + padding + \ + binascii.unhexlify(""" +01 00 00 00 01 00 00 00 04 00 02 00 00 00 00 00 +00 00 00 00 ff ff ff ff 08 00 02 00 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2IoctlRequest(kwargs['fid'], 0x0011C017, flags = 0x01, max_out_size = 8196, in_data = data_bytes)) + m.tid = kwargs['tid'] + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, listShareResultsCB, errback, tid = kwargs['tid'], fid = kwargs['fid']) + messages_history.append(m) + else: + closeFid(kwargs['tid'], kwargs['fid'], error = 'Failed to list shares: Unable to bind to Server Service RPC endpoint') + + def listShareResultsCB(result_message, **kwargs): + messages_history.append(result_message) + if result_message.status == 0: + # The payload.data_bytes will contain the results of the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. + data_bytes = result_message.payload.out_data + + if ord(data_bytes[3]) & 0x02 == 0: + sendReadRequest(kwargs['tid'], kwargs['fid'], data_bytes) + else: + decodeResults(kwargs['tid'], kwargs['fid'], data_bytes) + else: + closeFid(kwargs['tid'], kwargs['fid']) + errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) + + def decodeResults(tid, fid, data_bytes): + shares_count = struct.unpack('<I', data_bytes[36:40])[0] + results = [ ] # A list of SharedDevice instances + offset = 36 + 12 # You need to study the byte stream to understand the meaning of these constants + for i in range(0, shares_count): + results.append(SharedDevice(struct.unpack('<I', data_bytes[offset+4:offset+8])[0], None, None)) + offset += 12 + + for i in range(0, shares_count): + max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) + offset += 12 + results[i].name = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') + + if length % 2 != 0: + offset += (length * 2 + 2) + else: + offset += (length * 2) + + max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) + offset += 12 + results[i].comments = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') + + if length % 2 != 0: + offset += (length * 2 + 2) + else: + offset += (length * 2) + + closeFid(tid, fid) + callback(results) + + def sendReadRequest(tid, fid, data_bytes): + read_count = min(4280, self.max_read_size) + m = SMB2Message(self, SMB2ReadRequest(fid, 0, read_count)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, + tid = tid, fid = fid, data_bytes = data_bytes) + + def readCB(read_message, **kwargs): + messages_history.append(read_message) + if read_message.status == 0: + data_bytes = read_message.payload.data + + if ord(data_bytes[3]) & 0x02 == 0: + sendReadRequest(kwargs['tid'], kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:]) + else: + decodeResults(kwargs['tid'], kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:]) + else: + closeFid(kwargs['tid'], kwargs['fid']) + errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) + + def closeFid(tid, fid, results = None, error = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, results = results, error = error) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['results'] is not None: + callback(kwargs['results']) + elif kwargs['error'] is not None: + errback(OperationFailure(kwargs['error'], messages_history)) + + if not self.connected_trees.has_key(path): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[path] = connect_message.tid + connectSrvSvc(connect_message.tid) + else: + errback(OperationFailure('Failed to list shares: Unable to connect to IPC$', messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), path ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = path) + messages_history.append(m) + else: + connectSrvSvc(self.connected_trees[path]) + + def _listPath_SMB2(self, service_name, path, callback, errback, search, pattern, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + results = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES | SYNCHRONIZE, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_DIRECTORY_FILE, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + sendQuery(kwargs['tid'], create_message.payload.fid, '') + else: + errback(OperationFailure('Failed to list %s on %s: Unable to open directory' % ( path, service_name ), messages_history)) + + def sendQuery(tid, fid, data_buf): + if self.smb2_dialect != SMB2_DIALECT_2 and self.cap_multi_credit: + output_buf_len = 64 * 1024 * (self.credits - 1) + else: + output_buf_len = self.max_transact_size + + m = SMB2Message(self, SMB2QueryDirectoryRequest(fid, pattern, + info_class = 0x25, # FileIdBothDirectoryInformation + flags = 0, + output_buf_len = output_buf_len)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, queryCB, errback, tid = tid, fid = fid, data_buf = data_buf) + messages_history.append(m) + + def queryCB(query_message, **kwargs): + messages_history.append(query_message) + if query_message.status == 0: + data_buf = decodeQueryStruct(kwargs['data_buf'] + query_message.payload.data) + sendQuery(kwargs['tid'], kwargs['fid'], data_buf) + elif query_message.status == 0x80000006L: # STATUS_NO_MORE_FILES + closeFid(kwargs['tid'], kwargs['fid'], results = results) + else: + closeFid(kwargs['tid'], kwargs['fid'], error = query_message.status) + + def decodeQueryStruct(data_bytes): + # FileIdBothDirectoryInformation structure. See [MS-SMB]: 2.2.8.1.3 and [MS-FSCC]: 2.4.17 + info_format = '<IIQQQQQQIIIBB24sHQ' + info_size = struct.calcsize(info_format) + + data_length = len(data_bytes) + offset = 0 + while offset < data_length: + if offset + info_size > data_length: + return data_bytes[offset:] + + next_offset, _, \ + create_time, last_access_time, last_write_time, last_attr_change_time, \ + file_size, alloc_size, file_attributes, filename_length, ea_size, \ + short_name_length, _, short_name, _, file_id = struct.unpack(info_format, data_bytes[offset:offset+info_size]) + + offset2 = offset + info_size + if offset2 + filename_length > data_length: + return data_bytes[offset:] + + filename = data_bytes[offset2:offset2+filename_length].decode('UTF-16LE') + short_name = short_name[:short_name_length].decode('UTF-16LE') + + accept_result = False + if (file_attributes & 0xff) in ( 0x00, ATTR_NORMAL ): # Only the first 8-bits are compared. We ignore other bits like temp, compressed, encryption, sparse, indexed, etc + accept_result = (search == SMB_FILE_ATTRIBUTE_NORMAL) or (search & SMB_FILE_ATTRIBUTE_INCL_NORMAL) + else: + accept_result = (file_attributes & search) > 0 + if accept_result: + results.append(SharedFile(convertFILETIMEtoEpoch(create_time), convertFILETIMEtoEpoch(last_access_time), + convertFILETIMEtoEpoch(last_write_time), convertFILETIMEtoEpoch(last_attr_change_time), + file_size, alloc_size, file_attributes, short_name, filename, file_id)) + + if next_offset: + offset += next_offset + else: + break + return '' + + def closeFid(tid, fid, results = None, error = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, results = results, error = error) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['results'] is not None: + callback(kwargs['results']) + elif kwargs['error'] is not None: + errback(OperationFailure('Failed to list %s on %s: Query failed with errorcode 0x%08x' % ( path, service_name, kwargs['error'] ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to list %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _getAttributes_SMB2(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES | SYNCHRONIZE, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = 0, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + p = create_message.payload + filename = self._extractLastPathComponent(unicode(path)) + info = SharedFile(p.create_time, p.lastaccess_time, p.lastwrite_time, p.change_time, + p.file_size, p.allocation_size, p.file_attributes, + filename, filename) + closeFid(kwargs['tid'], p.fid, info = info) + else: + errback(OperationFailure('Failed to get attributes for %s on %s: Unable to open remote file object' % ( path, service_name ), messages_history)) + + def closeFid(tid, fid, info = None, error = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, info = info, error = error) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['info'] is not None: + callback(kwargs['info']) + elif kwargs['error'] is not None: + errback(OperationFailure('Failed to get attributes for %s on %s: Query failed with errorcode 0x%08x' % ( path, service_name, kwargs['error'] ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to get attributes for %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _getSecurity_SMB2(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + results = [ ] + + def sendCreate(tid): + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES | READ_CONTROL | SYNCHRONIZE, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = 0, + create_disp = FILE_OPEN)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + if self.smb2_dialect != SMB2_DIALECT_2 and self.cap_multi_credit: + output_buf_len = 64 * 1024 * (self.credits - 1) + else: + output_buf_len = self.max_transact_size + + m = SMB2Message(self, SMB2QueryInfoRequest(create_message.payload.fid, + flags = 0, + additional_info = OWNER_SECURITY_INFORMATION | GROUP_SECURITY_INFORMATION | DACL_SECURITY_INFORMATION, + info_type = SMB2_INFO_SECURITY, + file_info_class = 0, # [MS-SMB2] 2.2.37, 3.2.4.12 + input_buf = '', + output_buf_len = output_buf_len)) + m.tid = kwargs['tid'] + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, queryCB, errback, tid = kwargs['tid'], fid = create_message.payload.fid) + messages_history.append(m) + else: + errback(OperationFailure('Failed to get the security descriptor of %s on %s: Unable to open file or directory' % ( path, service_name ), messages_history)) + + def queryCB(query_message, **kwargs): + messages_history.append(query_message) + if query_message.status == 0: + security = SecurityDescriptor.from_bytes(query_message.payload.data) + closeFid(kwargs['tid'], kwargs['fid'], result = security) + else: + closeFid(kwargs['tid'], kwargs['fid'], error = query_message.status) + + def closeFid(tid, fid, result = None, error = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, result = result, error = error) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['result'] is not None: + callback(kwargs['result']) + elif kwargs['error'] is not None: + errback(OperationFailure('Failed to get the security descriptor of %s on %s: Query failed with errorcode 0x%08x' % ( path, service_name, kwargs['error'] ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to get the security descriptor of %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _retrieveFile_SMB2(self, service_name, path, file_obj, callback, errback, timeout = 30): + return self._retrieveFileFromOffset(service_name, path, file_obj, callback, errback, 0L, -1L, timeout) + + def _retrieveFileFromOffset_SMB2(self, service_name, path, file_obj, callback, errback, starting_offset, max_length, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + results = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES | READ_CONTROL | SYNCHRONIZE, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_SEQUENTIAL_ONLY | FILE_NON_DIRECTORY_FILE, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + m = SMB2Message(self, SMB2QueryInfoRequest(create_message.payload.fid, + flags = 0, + additional_info = 0, + info_type = SMB2_INFO_FILE, + file_info_class = 0x16, # FileStreamInformation [MS-FSCC] 2.4 + input_buf = '', + output_buf_len = 4096)) + m.tid = kwargs['tid'] + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, infoCB, errback, + tid = kwargs['tid'], + fid = create_message.payload.fid, + file_attributes = create_message.payload.file_attributes) + messages_history.append(m) + else: + errback(OperationFailure('Failed to retrieve %s on %s: Unable to open file' % ( path, service_name ), messages_history)) + + def infoCB(info_message, **kwargs): + messages_history.append(info_message) + if info_message.status == 0: + file_len = struct.unpack('<Q', info_message.payload.data[8:16])[0] + if max_length == 0 or starting_offset > file_len: + closeFid(info_message.tid, kwargs['fid']) + callback(( file_obj, kwargs['file_attributes'], 0 )) # Note that this is a tuple of 3-elements + else: + remaining_len = max_length + if remaining_len < 0: + remaining_len = file_len + if starting_offset + remaining_len > file_len: + remaining_len = file_len - starting_offset + sendRead(kwargs['tid'], kwargs['fid'], starting_offset, remaining_len, 0, kwargs['file_attributes']) + else: + errback(OperationFailure('Failed to retrieve %s on %s: Unable to retrieve information on file' % ( path, service_name ), messages_history)) + + def sendRead(tid, fid, offset, remaining_len, read_len, file_attributes): + read_count = min(self.max_read_size, remaining_len) + + if self.smb2_dialect != SMB2_DIALECT_2 and self.cap_multi_credit: + max_read_count = 64 * 1024 * (self.credits -1) + read_count = min(read_count, max_read_count) + + m = SMB2Message(self, SMB2ReadRequest(fid, offset, read_count)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, + tid = tid, fid = fid, offset = offset, + remaining_len = remaining_len, + read_len = read_len, + file_attributes = file_attributes) + + def readCB(read_message, **kwargs): + # To avoid crazy memory usage when retrieving large files, we do not save every read_message in messages_history. + if read_message.status == 0: + data_len = read_message.payload.data_length + file_obj.write(read_message.payload.data) + + remaining_len = kwargs['remaining_len'] - data_len + + if remaining_len > 0: + sendRead(kwargs['tid'], kwargs['fid'], kwargs['offset'] + data_len, remaining_len, kwargs['read_len'] + data_len, kwargs['file_attributes']) + else: + closeFid(kwargs['tid'], kwargs['fid'], ret = ( file_obj, kwargs['file_attributes'], kwargs['read_len'] + data_len )) + else: + messages_history.append(read_message) + closeFid(kwargs['tid'], kwargs['fid'], error = read_message.status) + + def closeFid(tid, fid, ret = None, error = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, ret = ret, error = error) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['ret'] is not None: + callback(kwargs['ret']) + elif kwargs['error'] is not None: + errback(OperationFailure('Failed to retrieve %s on %s: Read failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to retrieve %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _storeFile_SMB2(self, service_name, path, file_obj, callback, errback, timeout = 30): + self._storeFileFromOffset_SMB2(service_name, path, file_obj, callback, errback, 0L, True, timeout) + + def _storeFileFromOffset_SMB2(self, service_name, path, file_obj, callback, errback, starting_offset, truncate = False, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 20 00 00 00 10 00 04 00 +00 00 18 00 08 00 00 00 41 6c 53 69 00 00 00 00 +85 62 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = ATTR_ARCHIVE, + access_mask = FILE_READ_DATA | FILE_WRITE_DATA | FILE_APPEND_DATA | FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | FILE_READ_EA | FILE_WRITE_EA | READ_CONTROL | SYNCHRONIZE, + share_access = 0, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_SEQUENTIAL_ONLY | FILE_NON_DIRECTORY_FILE, + create_disp = FILE_OVERWRITE_IF if truncate else FILE_OPEN_IF, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + create_message.tid = kwargs['tid'] + messages_history.append(create_message) + if create_message.status == 0: + sendWrite(create_message.tid, create_message.payload.fid, starting_offset) + else: + errback(OperationFailure('Failed to store %s on %s: Unable to open file' % ( path, service_name ), messages_history)) + + def sendWrite(tid, fid, offset): + if self.smb2_dialect != SMB2_DIALECT_2 and self.cap_multi_credit: + write_count = 64 * 1024 * (self.credits -1) + else: + write_count = self.max_write_size + data = file_obj.read(write_count) + data_len = len(data) + if data_len > 0: + m = SMB2Message(self, SMB2WriteRequest(fid, data, offset)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, writeCB, errback, tid = tid, fid = fid, offset = offset+data_len) + else: + closeFid(tid, fid, offset = offset) + + def writeCB(write_message, **kwargs): + # To avoid crazy memory usage when saving large files, we do not save every write_message in messages_history. + if write_message.status == 0: + sendWrite(kwargs['tid'], kwargs['fid'], kwargs['offset']) + else: + messages_history.append(write_message) + closeFid(kwargs['tid'], kwargs['fid']) + errback(OperationFailure('Failed to store %s on %s: Write failed' % ( path, service_name ), messages_history)) + + def closeFid(tid, fid, error = None, offset = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback, fid = fid, offset = offset, error = error) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['offset'] is not None: + callback(( file_obj, kwargs['offset'] )) # Note that this is a tuple of 2-elements + elif kwargs['error'] is not None: + errback(OperationFailure('Failed to store %s on %s: Write failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to store %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + + def _deleteFiles_SMB2(self, service_name, path_file_pattern, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path_file_pattern.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = DELETE | FILE_READ_ATTRIBUTES, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_NON_DIRECTORY_FILE, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(open_message, **kwargs): + open_message.tid = kwargs['tid'] + messages_history.append(open_message) + if open_message.status == 0: + sendDelete(open_message.tid, open_message.payload.fid) + else: + errback(OperationFailure('Failed to delete %s on %s: Unable to open file' % ( path, service_name ), messages_history)) + + def sendDelete(tid, fid): + m = SMB2Message(self, SMB2SetInfoRequest(fid, + additional_info = 0, + info_type = SMB2_INFO_FILE, + file_info_class = 0x0d, # SMB2_FILE_DISPOSITION_INFO + data = '\x01')) + # [MS-SMB2]: 2.2.39, [MS-FSCC]: 2.4.11 + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback, tid = tid, fid = fid) + messages_history.append(m) + + def deleteCB(delete_message, **kwargs): + messages_history.append(delete_message) + if delete_message.status == 0: + closeFid(kwargs['tid'], kwargs['fid'], status = 0) + else: + closeFid(kwargs['tid'], kwargs['fid'], status = delete_message.status) + + def closeFid(tid, fid, status = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback, status = status) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['status'] == 0: + callback(path_file_pattern) + else: + errback(OperationFailure('Failed to delete %s on %s: Delete failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _resetFileAttributes_SMB2(self, service_name, path_file_pattern, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path_file_pattern.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = FILE_WRITE_ATTRIBUTES, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = 0, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(open_message, **kwargs): + messages_history.append(open_message) + if open_message.status == 0: + sendReset(kwargs['tid'], open_message.payload.fid) + else: + errback(OperationFailure('Failed to reset attributes of %s on %s: Unable to open file' % ( path, service_name ), messages_history)) + + def sendReset(tid, fid): + m = SMB2Message(self, SMB2SetInfoRequest(fid, + additional_info = 0, + info_type = SMB2_INFO_FILE, + file_info_class = 4, # FileBasicInformation + data = struct.pack('qqqqii',0,0,0,0,0x80,0))) # FILE_ATTRIBUTE_NORMAL + # [MS-SMB2]: 2.2.39, [MS-FSCC]: 2.4, [MS-FSCC]: 2.4.7, [MS-FSCC]: 2.6 + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, resetCB, errback, tid = tid, fid = fid) + messages_history.append(m) + + def resetCB(reset_message, **kwargs): + messages_history.append(reset_message) + if reset_message.status == 0: + closeFid(kwargs['tid'], kwargs['fid'], status = 0) + else: + closeFid(kwargs['tid'], kwargs['fid'], status = reset_message.status) + + def closeFid(tid, fid, status = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback, status = status) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['status'] == 0: + callback(path_file_pattern) + else: + errback(OperationFailure('Failed to reset attributes of %s on %s: Reset failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to reset attributes of %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _createDirectory_SMB2(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_EA | FILE_WRITE_EA | FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | READ_CONTROL | DELETE | SYNCHRONIZE, + share_access = 0, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT, + create_disp = FILE_CREATE, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + closeFid(kwargs['tid'], create_message.payload.fid) + else: + errback(OperationFailure('Failed to create directory %s on %s: Create failed' % ( path, service_name ), messages_history)) + + def closeFid(tid, fid): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, closeCB, errback) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + callback(path) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to create directory %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _deleteDirectory_SMB2(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = DELETE | FILE_READ_ATTRIBUTES, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_DIRECTORY_FILE, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(open_message, **kwargs): + messages_history.append(open_message) + if open_message.status == 0: + sendDelete(kwargs['tid'], open_message.payload.fid) + else: + errback(OperationFailure('Failed to delete %s on %s: Unable to open directory' % ( path, service_name ), messages_history)) + + def sendDelete(tid, fid): + m = SMB2Message(self, SMB2SetInfoRequest(fid, + additional_info = 0, + info_type = SMB2_INFO_FILE, + file_info_class = 0x0d, # SMB2_FILE_DISPOSITION_INFO + data = '\x01')) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback, tid = tid, fid = fid) + messages_history.append(m) + + def deleteCB(delete_message, **kwargs): + messages_history.append(delete_message) + if delete_message.status == 0: + closeFid(kwargs['tid'], kwargs['fid'], status = 0) + else: + closeFid(kwargs['tid'], kwargs['fid'], status = delete_message.status) + + def closeFid(tid, fid, status = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, status = status) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['status'] == 0: + callback(path) + else: + errback(OperationFailure('Failed to delete %s on %s: Delete failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _rename_SMB2(self, service_name, old_path, new_path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + messages_history = [ ] + + new_path = new_path.replace('/', '\\') + if new_path.startswith('\\'): + new_path = new_path[1:] + if new_path.endswith('\\'): + new_path = new_path[:-1] + + old_path = old_path.replace('/', '\\') + if old_path.startswith('\\'): + old_path = old_path[1:] + if old_path.endswith('\\'): + old_path = old_path[:-1] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 18 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +00 00 00 00 10 00 04 00 00 00 18 00 00 00 00 00 +51 46 69 64 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(old_path, + file_attributes = 0, + access_mask = DELETE | FILE_READ_ATTRIBUTES | SYNCHRONIZE, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_SYNCHRONOUS_IO_NONALERT, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + sendRename(kwargs['tid'], create_message.payload.fid) + else: + errback(OperationFailure('Failed to rename %s on %s: Unable to open file/directory' % ( old_path, service_name ), messages_history)) + + def sendRename(tid, fid): + data = '\x00'*16 + struct.pack('<I', len(new_path)*2) + new_path.encode('UTF-16LE') + m = SMB2Message(self, SMB2SetInfoRequest(fid, + additional_info = 0, + info_type = SMB2_INFO_FILE, + file_info_class = 0x0a, # SMB2_FILE_RENAME_INFO + data = data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, renameCB, errback, tid = tid, fid = fid) + messages_history.append(m) + + def renameCB(rename_message, **kwargs): + messages_history.append(rename_message) + if rename_message.status == 0: + closeFid(kwargs['tid'], kwargs['fid'], status = 0) + else: + closeFid(kwargs['tid'], kwargs['fid'], status = rename_message.status) + + def closeFid(tid, fid, status = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, status = status) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['status'] == 0: + callback(( old_path, new_path )) + else: + errback(OperationFailure('Failed to rename %s on %s: Rename failed' % ( old_path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to rename %s on %s: Unable to connect to shared device' % ( old_path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _listSnapshots_SMB2(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendCreate(tid): + create_context_data = binascii.unhexlify(""" +28 00 00 00 10 00 04 00 00 00 18 00 10 00 00 00 +44 48 6e 51 00 00 00 00 00 00 00 00 00 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 10 00 04 00 +00 00 18 00 00 00 00 00 4d 78 41 63 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMB2Message(self, SMB2CreateRequest(path, + file_attributes = 0, + access_mask = FILE_READ_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE, + oplock = SMB2_OPLOCK_LEVEL_NONE, + impersonation = SEC_IMPERSONATE, + create_options = FILE_SYNCHRONOUS_IO_NONALERT, + create_disp = FILE_OPEN, + create_context_data = create_context_data)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback, tid = tid) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if create_message.status == 0: + sendEnumSnapshots(kwargs['tid'], create_message.payload.fid) + else: + errback(OperationFailure('Failed to list snapshots %s on %s: Unable to open file/directory' % ( old_path, service_name ), messages_history)) + + def sendEnumSnapshots(tid, fid): + m = SMB2Message(self, SMB2IoctlRequest(fid, + ctlcode = 0x00144064, # FSCTL_SRV_ENUMERATE_SNAPSHOTS + flags = 0x0001, + in_data = '')) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, enumSnapshotsCB, errback, tid = tid, fid = fid) + messages_history.append(m) + + def enumSnapshotsCB(enum_message, **kwargs): + messages_history.append(enum_message) + if enum_message.status == 0: + results = [ ] + snapshots_count = struct.unpack('<I', enum_message.payload.out_data[4:8])[0] + for i in range(0, snapshots_count): + s = enum_message.payload.out_data[12+i*50:12+48+i*50].decode('UTF-16LE') + results.append(datetime(*map(int, ( s[5:9], s[10:12], s[13:15], s[16:18], s[19:21], s[22:24] )))) + closeFid(kwargs['tid'], kwargs['fid'], results = results) + else: + closeFid(kwargs['tid'], kwargs['fid'], status = enum_message.status) + + def closeFid(tid, fid, status = None, results = None): + m = SMB2Message(self, SMB2CloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, closeCB, errback, status = status, results = results) + messages_history.append(m) + + def closeCB(close_message, **kwargs): + if kwargs['results'] is not None: + callback(kwargs['results']) + else: + errback(OperationFailure('Failed to list snapshots %s on %s: List failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if connect_message.status == 0: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to list snapshots %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMB2Message(self, SMB2TreeConnectRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ))) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _echo_SMB2(self, data, callback, errback, timeout = 30): + messages_history = [ ] + + def echoCB(echo_message, **kwargs): + messages_history.append(echo_message) + if echo_message.status == 0: + callback(data) + else: + errback(OperationFailure('Echo failed', messages_history)) + + m = SMB2Message(self, SMB2EchoRequest()) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, echoCB, errback) + messages_history.append(m) + + + # + # SMB1 Methods Family + # + + def _sendSMBMessage_SMB1(self, smb_message): + if smb_message.mid == 0: + smb_message.mid = self._getNextMID_SMB1() + if not smb_message.uid: + smb_message.uid = self.uid + if self.is_signing_active: + smb_message.flags2 |= SMB_FLAGS2_SMB_SECURITY_SIGNATURE + + # Increment the next_signing_id as described in [MS-CIFS] 3.2.4.1.3 + smb_message.security = self.next_signing_id + self.next_signing_id += 2 # All our defined messages currently have responses, so always increment by 2 + raw_data = smb_message.encode() + + md = ntlm.MD5(self.signing_session_key) + if self.signing_challenge_response: + md.update(self.signing_challenge_response) + md.update(raw_data) + signature = md.digest()[:8] + + self.log.debug('MID is %d. Signing ID is %d. Signature is %s. Total raw message is %d bytes', smb_message.mid, smb_message.security, binascii.hexlify(signature), len(raw_data)) + smb_message.raw_data = raw_data[:14] + signature + raw_data[22:] + else: + smb_message.raw_data = smb_message.encode() + self.sendNMBMessage(smb_message.raw_data) + + def _getNextMID_SMB1(self): + self.mid += 1 + if self.mid >= 0xFFFF: # MID cannot be 0xFFFF. [MS-CIFS]: 2.2.1.6.2 + # We don't use MID of 0 as MID can be reused for SMB_COM_TRANSACTION2_SECONDARY messages + # where if mid=0, _sendSMBMessage will re-assign new MID values again + self.mid = 1 + return self.mid + + def _updateState_SMB1(self, message): + if message.isReply: + if message.command == SMB_COM_NEGOTIATE: + if not message.status.hasError: + self.has_negotiated = True + self.log.info('SMB dialect negotiation successful (ExtendedSecurity:%s)', message.hasExtendedSecurity) + self._updateServerInfo(message.payload) + self._handleNegotiateResponse(message) + else: + raise ProtocolError('Unknown status value (0x%08X) in SMB_COM_NEGOTIATE' % message.status.internal_value, + message.raw_data, message) + elif message.command == SMB_COM_SESSION_SETUP_ANDX: + if message.hasExtendedSecurity: + if not message.status.hasError: + try: + result = securityblob.decodeAuthResponseSecurityBlob(message.payload.security_blob) + if result == securityblob.RESULT_ACCEPT_COMPLETED: + self.log.debug('SMB uid is now %d', message.uid) + self.uid = message.uid + self.has_authenticated = True + self.log.info('Authentication (with extended security) successful!') + self.onAuthOK() + else: + raise ProtocolError('SMB_COM_SESSION_SETUP_ANDX status is 0 but security blob negResult value is %d' % result, message.raw_data, message) + except securityblob.BadSecurityBlobError, ex: + raise ProtocolError(str(ex), message.raw_data, message) + elif message.status.internal_value == 0xc0000016: # STATUS_MORE_PROCESSING_REQUIRED + try: + result, ntlm_token = securityblob.decodeChallengeSecurityBlob(message.payload.security_blob) + if result == securityblob.RESULT_ACCEPT_INCOMPLETE: + self._handleSessionChallenge(message, ntlm_token) + except ( securityblob.BadSecurityBlobError, securityblob.UnsupportedSecurityProvider ), ex: + raise ProtocolError(str(ex), message.raw_data, message) + elif (message.status.internal_value == 0xc000006d # STATUS_LOGON_FAILURE + or message.status.internal_value == 0xc0000064 # STATUS_NO_SUCH_USER + or message.status.internal_value == 0xc000006a): # STATUS_WRONG_PASSWORD + self.has_authenticated = False + self.log.info('Authentication (with extended security) failed. Please check username and password.') + self.onAuthFailed() + elif (message.status.internal_value == 0xc0000193 # STATUS_ACCOUNT_EXPIRED + or message.status.internal_value == 0xC0000071): # STATUS_PASSWORD_EXPIRED + self.has_authenticated = False + self.log.info('Authentication (with extended security) failed. Account or password has expired.') + self.onAuthFailed() + elif message.status.internal_value == 0xc0000234: # STATUS_ACCOUNT_LOCKED_OUT + self.has_authenticated = False + self.log.info('Authentication (with extended security) failed. Account has been locked due to too many invalid logon attempts.') + self.onAuthFailed() + elif message.status.internal_value == 0xc0000072: # STATUS_ACCOUNT_DISABLED + self.has_authenticated = False + self.log.info('Authentication (with extended security) failed. Account has been disabled.') + self.onAuthFailed() + elif (message.status.internal_value == 0xc000006f # STATUS_INVALID_LOGON_HOURS + or message.status.internal_value == 0xc000015b # STATUS_LOGON_TYPE_NOT_GRANTED + or message.status.internal_value == 0xc0000070): # STATUS_INVALID_WORKSTATION + self.has_authenticated = False + self.log.info('Authentication (with extended security) failed. Not allowed.') + self.onAuthFailed() + elif message.status.internal_value == 0xc000018c: # STATUS_TRUSTED_DOMAIN_FAILURE + self.has_authenticated = False + self.log.info('Authentication (with extended security) failed. Domain not trusted.') + self.onAuthFailed() + elif message.status.internal_value == 0xc000018d: # STATUS_TRUSTED_RELATIONSHIP_FAILURE + self.has_authenticated = False + self.log.info('Authentication (with extended security) failed. Workstation not trusted.') + self.onAuthFailed() + else: + raise ProtocolError('Unknown status value (0x%08X) in SMB_COM_SESSION_SETUP_ANDX (with extended security)' % message.status.internal_value, + message.raw_data, message) + else: + if message.status.internal_value == 0: + self.log.debug('SMB uid is now %d', message.uid) + self.uid = message.uid + self.has_authenticated = True + self.log.info('Authentication (without extended security) successful!') + self.onAuthOK() + else: + self.has_authenticated = False + self.log.info('Authentication (without extended security) failed. Please check username and password') + self.onAuthFailed() + elif message.command == SMB_COM_TREE_CONNECT_ANDX: + try: + req = self.pending_requests[message.mid] + except KeyError: + pass + else: + if not message.status.hasError: + self.connected_trees[req.kwargs['path']] = message.tid + + req = self.pending_requests.pop(message.mid, None) + if req: + req.callback(message, **req.kwargs) + return True + + + def _updateServerInfo_SMB1(self, payload): + self.capabilities = payload.capabilities + self.security_mode = payload.security_mode + self.max_raw_size = payload.max_raw_size + self.max_buffer_size = payload.max_buffer_size + self.max_mpx_count = payload.max_mpx_count + self.use_plaintext_authentication = not bool(payload.security_mode & NEGOTIATE_ENCRYPT_PASSWORDS) + + if self.use_plaintext_authentication: + self.log.warning('Remote server only supports plaintext authentication. Your password can be stolen easily over the network.') + + + def _handleSessionChallenge_SMB1(self, message, ntlm_token): + assert message.hasExtendedSecurity + + if message.uid and not self.uid: + self.uid = message.uid + + server_challenge, server_flags, server_info = ntlm.decodeChallengeMessage(ntlm_token) + if self.use_ntlm_v2: + self.log.info('Performing NTLMv2 authentication (with extended security) with server challenge "%s"', binascii.hexlify(server_challenge)) + nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV2(self.password, + self.username, + server_challenge, + server_info, + self.domain) + + else: + self.log.info('Performing NTLMv1 authentication (with extended security) with server challenge "%s"', binascii.hexlify(server_challenge)) + nt_challenge_response, lm_challenge_response, session_key = ntlm.generateChallengeResponseV1(self.password, server_challenge, True) + + ntlm_data = ntlm.generateAuthenticateMessage(server_flags, + nt_challenge_response, + lm_challenge_response, + session_key, + self.username, + self.domain, + self.my_name) + + if self.log.isEnabledFor(logging.DEBUG): + self.log.debug('NT challenge response is "%s" (%d bytes)', binascii.hexlify(nt_challenge_response), len(nt_challenge_response)) + self.log.debug('LM challenge response is "%s" (%d bytes)', binascii.hexlify(lm_challenge_response), len(lm_challenge_response)) + + blob = securityblob.generateAuthSecurityBlob(ntlm_data) + self._sendSMBMessage(SMBMessage(self, ComSessionSetupAndxRequest__WithSecurityExtension(0, blob))) + + if self.security_mode & NEGOTIATE_SECURITY_SIGNATURES_REQUIRE: + self.log.info('Server requires all SMB messages to be signed') + self.is_signing_active = (self.sign_options != SMB.SIGN_NEVER) + elif self.security_mode & NEGOTIATE_SECURITY_SIGNATURES_ENABLE: + self.log.info('Server supports SMB signing') + self.is_signing_active = (self.sign_options == SMB.SIGN_WHEN_SUPPORTED) + else: + self.is_signing_active = False + + if self.is_signing_active: + self.log.info("SMB signing activated. All SMB messages will be signed.") + self.signing_session_key = session_key + if self.capabilities & CAP_EXTENDED_SECURITY: + self.signing_challenge_response = None + else: + self.signing_challenge_response = blob + else: + self.log.info("SMB signing deactivated. SMB messages will NOT be signed.") + + + def _handleNegotiateResponse_SMB1(self, message): + if message.uid and not self.uid: + self.uid = message.uid + + if message.hasExtendedSecurity or message.payload.supportsExtendedSecurity: + ntlm_data = ntlm.generateNegotiateMessage() + blob = securityblob.generateNegotiateSecurityBlob(ntlm_data) + self._sendSMBMessage(SMBMessage(self, ComSessionSetupAndxRequest__WithSecurityExtension(message.payload.session_key, blob))) + else: + nt_password, _, _ = ntlm.generateChallengeResponseV1(self.password, message.payload.challenge, False) + self.log.info('Performing NTLMv1 authentication (without extended security) with challenge "%s" and hashed password of "%s"', + binascii.hexlify(message.payload.challenge), + binascii.hexlify(nt_password)) + self._sendSMBMessage(SMBMessage(self, ComSessionSetupAndxRequest__NoSecurityExtension(message.payload.session_key, + self.username, + nt_password, + True, + self.domain))) + + def _listShares_SMB1(self, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = 'IPC$' + messages_history = [ ] + + def connectSrvSvc(tid): + m = SMBMessage(self, ComNTCreateAndxRequest('\\srvsvc', + flags = NT_CREATE_REQUEST_EXTENDED_RESPONSE, + access_mask = READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | FILE_WRITE_EA | FILE_READ_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | FILE_READ_DATA, + share_access = FILE_SHARE_READ | FILE_SHARE_WRITE, + create_disp = FILE_OPEN, + create_options = FILE_OPEN_NO_RECALL | FILE_NON_DIRECTORY_FILE, + impersonation = SEC_IMPERSONATE, + security_flags = 0)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectSrvSvcCB, errback) + messages_history.append(m) + + def connectSrvSvcCB(create_message, **kwargs): + messages_history.append(create_message) + if not create_message.status.hasError: + call_id = self._getNextRPCCallID() + # See [MS-CIFS]: 2.2.5.6.1 for more information on TRANS_TRANSACT_NMPIPE (0x0026) parameters + setup_bytes = struct.pack('<HH', 0x0026, create_message.payload.fid) + # The data_bytes are binding call to Server Service RPC using DCE v1.1 RPC over SMB. See [MS-SRVS] and [C706] + # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream + data_bytes = \ + binascii.unhexlify("""05 00 0b 03 10 00 00 00 48 00 00 00""".replace(' ', '')) + \ + struct.pack('<I', call_id) + \ + binascii.unhexlify(""" +b8 10 b8 10 00 00 00 00 01 00 00 00 00 00 01 00 +c8 4f 32 4b 70 16 d3 01 12 78 5a 47 bf 6e e1 88 +03 00 00 00 04 5d 88 8a eb 1c c9 11 9f e8 08 00 +2b 10 48 60 02 00 00 00""".replace(' ', '').replace('\n', '')) + m = SMBMessage(self, ComTransactionRequest(max_params_count = 0, + max_data_count = 4280, + max_setup_count = 0, + data_bytes = data_bytes, + setup_bytes = setup_bytes)) + m.tid = create_message.tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, rpcBindCB, errback, fid = create_message.payload.fid) + messages_history.append(m) + else: + errback(OperationFailure('Failed to list shares: Unable to locate Server Service RPC endpoint', messages_history)) + + def rpcBindCB(trans_message, **kwargs): + messages_history.append(trans_message) + if not trans_message.status.hasError: + call_id = self._getNextRPCCallID() + + padding = '' + server_len = len(self.remote_name) + 1 + server_bytes_len = server_len * 2 + if server_len % 2 != 0: + padding = '\0\0' + server_bytes_len += 2 + + # See [MS-CIFS]: 2.2.5.6.1 for more information on TRANS_TRANSACT_NMPIPE (0x0026) parameters + setup_bytes = struct.pack('<HH', 0x0026, kwargs['fid']) + # The data bytes are the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. + # If you wish to understand the meanings of the byte stream, I would suggest you use a recent version of WireShark to packet capture the stream + data_bytes = \ + binascii.unhexlify("""05 00 00 03 10 00 00 00""".replace(' ', '')) + \ + struct.pack('<HHI', 72+server_bytes_len, 0, call_id) + \ + binascii.unhexlify("""4c 00 00 00 00 00 0f 00 00 00 02 00""".replace(' ', '')) + \ + struct.pack('<III', server_len, 0, server_len) + \ + (self.remote_name + '\0').encode('UTF-16LE') + padding + \ + binascii.unhexlify(""" +01 00 00 00 01 00 00 00 04 00 02 00 00 00 00 00 +00 00 00 00 ff ff ff ff 08 00 02 00 00 00 00 00 +""".replace(' ', '').replace('\n', '')) + m = SMBMessage(self, ComTransactionRequest(max_params_count = 0, + max_data_count = 4280, + max_setup_count = 0, + data_bytes = data_bytes, + setup_bytes = setup_bytes)) + m.tid = trans_message.tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, listShareResultsCB, errback, fid = kwargs['fid']) + messages_history.append(m) + else: + closeFid(trans_message.tid, kwargs['fid']) + errback(OperationFailure('Failed to list shares: Unable to bind to Server Service RPC endpoint', messages_history)) + + def listShareResultsCB(result_message, **kwargs): + messages_history.append(result_message) + if not result_message.status.hasError: + # The payload.data_bytes will contain the results of the RPC call to NetrShareEnum (Opnum 15) at Server Service RPC. + data_bytes = result_message.payload.data_bytes + + if ord(data_bytes[3]) & 0x02 == 0: + sendReadRequest(result_message.tid, kwargs['fid'], data_bytes) + else: + decodeResults(result_message.tid, kwargs['fid'], data_bytes) + else: + closeFid(result_message.tid, kwargs['fid']) + errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) + + def decodeResults(tid, fid, data_bytes): + shares_count = struct.unpack('<I', data_bytes[36:40])[0] + results = [ ] # A list of SharedDevice instances + offset = 36 + 12 # You need to study the byte stream to understand the meaning of these constants + for i in range(0, shares_count): + results.append(SharedDevice(struct.unpack('<I', data_bytes[offset+4:offset+8])[0], None, None)) + offset += 12 + + for i in range(0, shares_count): + max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) + offset += 12 + results[i].name = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') + + if length % 2 != 0: + offset += (length * 2 + 2) + else: + offset += (length * 2) + + max_length, _, length = struct.unpack('<III', data_bytes[offset:offset+12]) + offset += 12 + results[i].comments = unicode(data_bytes[offset:offset+length*2-2], 'UTF-16LE') + + if length % 2 != 0: + offset += (length * 2 + 2) + else: + offset += (length * 2) + + closeFid(tid, fid) + callback(results) + + def sendReadRequest(tid, fid, data_bytes): + read_count = min(4280, self.max_raw_size - 2) + m = SMBMessage(self, ComReadAndxRequest(fid = fid, + offset = 0, + max_return_bytes_count = read_count, + min_return_bytes_count = read_count)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, fid = fid, data_bytes = data_bytes) + + def readCB(read_message, **kwargs): + messages_history.append(read_message) + if not read_message.status.hasError: + data_bytes = read_message.payload.data + + if ord(data_bytes[3]) & 0x02 == 0: + sendReadRequest(read_message.tid, kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:]) + else: + decodeResults(read_message.tid, kwargs['fid'], kwargs['data_bytes'] + data_bytes[24:]) + else: + closeFid(read_message.tid, kwargs['fid']) + errback(OperationFailure('Failed to list shares: Unable to retrieve shared device list', messages_history)) + + def closeFid(tid, fid): + m = SMBMessage(self, ComCloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + messages_history.append(m) + + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[path] = connect_message.tid + connectSrvSvc(connect_message.tid) + else: + errback(OperationFailure('Failed to list shares: Unable to connect to IPC$', messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), path ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = path) + messages_history.append(m) + + def _listPath_SMB1(self, service_name, path, callback, errback, search, pattern, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if not path.endswith('\\'): + path += '\\' + messages_history = [ ] + results = [ ] + + def sendFindFirst(tid, support_dfs=False): + setup_bytes = struct.pack('<H', 0x0001) # TRANS2_FIND_FIRST2 sub-command. See [MS-CIFS]: 2.2.6.2.1 + params_bytes = \ + struct.pack('<HHHHI', + search & 0xFFFF, # SearchAttributes (need to restrict the values due to introduction of SMB_FILE_ATTRIBUTE_INCL_NORMAL) + 100, # SearchCount + 0x0006, # Flags: SMB_FIND_CLOSE_AT_EOS | SMB_FIND_RETURN_RESUME_KEYS + 0x0104, # InfoLevel: SMB_FIND_FILE_BOTH_DIRECTORY_INFO + 0x0000) # SearchStorageType (seems to be ignored by Windows) + if support_dfs: + params_bytes += ("\\" + self.remote_name + "\\" + service_name + path + pattern + '\0').encode('UTF-16LE') + else: + params_bytes += (path + pattern + '\0').encode('UTF-16LE') + + m = SMBMessage(self, ComTransaction2Request(max_params_count = 10, + max_data_count = 16644, + max_setup_count = 0, + params_bytes = params_bytes, + setup_bytes = setup_bytes)) + m.tid = tid + if support_dfs: + m.flags2 |= SMB_FLAGS2_DFS + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, findFirstCB, errback, support_dfs=support_dfs) + messages_history.append(m) + + def decodeFindStruct(data_bytes): + # SMB_FIND_FILE_BOTH_DIRECTORY_INFO structure. See [MS-CIFS]: 2.2.8.1.7 and [MS-SMB]: 2.2.8.1.1 + info_format = '<IIQQQQQQIIIBB24s' + info_size = struct.calcsize(info_format) + + data_length = len(data_bytes) + offset = 0 + while offset < data_length: + if offset + info_size > data_length: + return data_bytes[offset:] + + next_offset, _, \ + create_time, last_access_time, last_write_time, last_attr_change_time, \ + file_size, alloc_size, file_attributes, filename_length, ea_size, \ + short_name_length, _, short_name = struct.unpack(info_format, data_bytes[offset:offset+info_size]) + + offset2 = offset + info_size + if offset2 + filename_length > data_length: + return data_bytes[offset:] + + filename = data_bytes[offset2:offset2+filename_length].decode('UTF-16LE') + short_name = short_name.decode('UTF-16LE') + + accept_result = False + if (file_attributes & 0xff) in ( 0x00, ATTR_NORMAL ): # Only the first 8-bits are compared. We ignore other bits like temp, compressed, encryption, sparse, indexed, etc + accept_result = (search == SMB_FILE_ATTRIBUTE_NORMAL) or (search & SMB_FILE_ATTRIBUTE_INCL_NORMAL) + else: + accept_result = (file_attributes & search) > 0 + if accept_result: + results.append(SharedFile(convertFILETIMEtoEpoch(create_time), convertFILETIMEtoEpoch(last_access_time), + convertFILETIMEtoEpoch(last_write_time), convertFILETIMEtoEpoch(last_attr_change_time), + file_size, alloc_size, file_attributes, short_name, filename)) + + if next_offset: + offset += next_offset + else: + break + return '' + + def findFirstCB(find_message, **kwargs): + messages_history.append(find_message) + if not find_message.status.hasError: + if not kwargs.has_key('total_count'): + # TRANS2_FIND_FIRST2 response. [MS-CIFS]: 2.2.6.2.2 + sid, search_count, end_of_search, _, last_name_offset = struct.unpack('<HHHHH', find_message.payload.params_bytes[:10]) + kwargs.update({ 'sid': sid, 'end_of_search': end_of_search, 'last_name_offset': last_name_offset, 'data_buf': '' }) + else: + sid, end_of_search, last_name_offset = kwargs['sid'], kwargs['end_of_search'], kwargs['last_name_offset'] + + send_next = True + if find_message.payload.data_bytes: + d = decodeFindStruct(kwargs['data_buf'] + find_message.payload.data_bytes) + if not kwargs.has_key('data_count'): + if len(find_message.payload.data_bytes) != find_message.payload.total_data_count: + kwargs.update({ 'data_count': len(find_message.payload.data_bytes), + 'total_count': find_message.payload.total_data_count, + 'data_buf': d, + }) + send_next = False + else: + kwargs['data_count'] += len(find_message.payload.data_bytes) + kwargs['total_count'] = min(find_message.payload.total_data_count, kwargs['total_count']) + kwargs['data_buf'] = d + if kwargs['data_count'] != kwargs['total_count']: + send_next = False + + if not send_next: + self.pending_requests[find_message.mid] = _PendingRequest(find_message.mid, expiry_time, findFirstCB, errback, **kwargs) + elif end_of_search: + callback(results) + else: + sendFindNext(find_message.tid, sid, 0, results[-1].filename, kwargs.get('support_dfs', False)) + else: + errback(OperationFailure('Failed to list %s on %s: Unable to retrieve file list' % ( path, service_name ), messages_history)) + + def sendFindNext(tid, sid, resume_key, resume_file, support_dfs=False): + setup_bytes = struct.pack('<H', 0x0002) # TRANS2_FIND_NEXT2 sub-command. See [MS-CIFS]: 2.2.6.3.1 + params_bytes = \ + struct.pack('<HHHIH', + sid, # SID + 100, # SearchCount + 0x0104, # InfoLevel: SMB_FIND_FILE_BOTH_DIRECTORY_INFO + resume_key, # ResumeKey + 0x0006) # Flags: SMB_FIND_RETURN_RESUME_KEYS | SMB_FIND_CLOSE_AT_EOS + params_bytes += (resume_file+'\0').encode('UTF-16LE') + + m = SMBMessage(self, ComTransaction2Request(max_params_count = 10, + max_data_count = 16644, + max_setup_count = 0, + params_bytes = params_bytes, + setup_bytes = setup_bytes)) + m.tid = tid + if support_dfs: + m.flags2 |= SMB_FLAGS2_DFS + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, findNextCB, errback, sid = sid, support_dfs = support_dfs) + messages_history.append(m) + + def findNextCB(find_message, **kwargs): + messages_history.append(find_message) + if not find_message.status.hasError: + if not kwargs.has_key('total_count'): + # TRANS2_FIND_NEXT2 response. [MS-CIFS]: 2.2.6.3.2 + search_count, end_of_search, _, last_name_offset = struct.unpack('<HHHH', find_message.payload.params_bytes[:8]) + kwargs.update({ 'end_of_search': end_of_search, 'last_name_offset': last_name_offset, 'data_buf': '' }) + else: + end_of_search, last_name_offset = kwargs['end_of_search'], kwargs['last_name_offset'] + + send_next = True + if find_message.payload.data_bytes: + d = decodeFindStruct(kwargs['data_buf'] + find_message.payload.data_bytes) + if not kwargs.has_key('data_count'): + if len(find_message.payload.data_bytes) != find_message.payload.total_data_count: + kwargs.update({ 'data_count': len(find_message.payload.data_bytes), + 'total_count': find_message.payload.total_data_count, + 'data_buf': d, + }) + send_next = False + else: + kwargs['data_count'] += len(find_message.payload.data_bytes) + kwargs['total_count'] = min(find_message.payload.total_data_count, kwargs['total_count']) + kwargs['data_buf'] = d + if kwargs['data_count'] != kwargs['total_count']: + send_next = False + + if not send_next: + self.pending_requests[find_message.mid] = _PendingRequest(find_message.mid, expiry_time, findNextCB, errback, **kwargs) + elif end_of_search: + callback(results) + else: + sendFindNext(find_message.tid, kwargs['sid'], 0, results[-1].filename, kwargs.get('support_dfs', False)) + else: + errback(OperationFailure('Failed to list %s on %s: Unable to retrieve file list' % ( path, service_name ), messages_history)) + + def sendDFSReferral(tid): + setup_bytes = struct.pack('<H', 0x0010) # TRANS2_GET_DFS_REFERRAL sub-command. See [MS-CIFS]: 2.2.6.16.1 + params_bytes = struct.pack('<H', 3) # Max referral level 3 + params_bytes += ("\\" + self.remote_name + "\\" + service_name).encode('UTF-16LE') + + m = SMBMessage(self, ComTransaction2Request(max_params_count = 10, + max_data_count = 16644, + max_setup_count = 0, + params_bytes = params_bytes, + setup_bytes = setup_bytes)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, dfsReferralCB, errback) + messages_history.append(m) + + def dfsReferralCB(dfs_message, **kwargs): + sendFindFirst(dfs_message.tid, True) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + if connect_message.payload.optional_support & SMB_TREE_CONNECTX_SUPPORT_DFS: + sendDFSReferral(connect_message.tid) + else: + sendFindFirst(connect_message.tid, False) + else: + errback(OperationFailure('Failed to list %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendFindFirst(self.connected_trees[service_name]) + + def _getAttributes_SMB1(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if path.startswith('\\'): + path = path[1:] + if path.endswith('\\'): + path = path[:-1] + messages_history = [ ] + + def sendQuery(tid): + setup_bytes = struct.pack('<H', 0x0005) # TRANS2_QUERY_PATH_INFORMATION sub-command. See [MS-CIFS]: 2.2.6.6.1 + params_bytes = \ + struct.pack('<HI', + 0x0107, # SMB_QUERY_FILE_ALL_INFO ([MS-CIFS] 2.2.2.3.3) + 0x0000) # Reserved + params_bytes += (path + '\0').encode('UTF-16LE') + + m = SMBMessage(self, ComTransaction2Request(max_params_count = 2, + max_data_count = 65535, + max_setup_count = 0, + params_bytes = params_bytes, + setup_bytes = setup_bytes)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, queryCB, errback) + messages_history.append(m) + + def queryCB(query_message, **kwargs): + messages_history.append(query_message) + if not query_message.status.hasError: + info_format = '<QQQQIIQQ' + info_size = struct.calcsize(info_format) + create_time, last_access_time, last_write_time, last_attr_change_time, \ + file_attributes, _, alloc_size, file_size = struct.unpack(info_format, query_message.payload.data_bytes[:info_size]) + filename = self._extractLastPathComponent(unicode(path)) + + info = SharedFile(convertFILETIMEtoEpoch(create_time), convertFILETIMEtoEpoch(last_access_time), convertFILETIMEtoEpoch(last_write_time), convertFILETIMEtoEpoch(last_attr_change_time), + file_size, alloc_size, file_attributes, filename, filename) + callback(info) + else: + errback(OperationFailure('Failed to get attributes for %s on %s: Read failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendQuery(connect_message.tid) + else: + errback(OperationFailure('Failed to get attributes for %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendQuery(self.connected_trees[service_name]) + + def _getSecurity_SMB1(self, service_name, path_file_pattern, callback, errback, timeout = 30): + raise NotReadyError('getSecurity is not yet implemented for SMB1') + + def _retrieveFile_SMB1(self, service_name, path, file_obj, callback, errback, timeout = 30): + return self._retrieveFileFromOffset(service_name, path, file_obj, callback, errback, 0L, -1L, timeout) + + def _retrieveFileFromOffset_SMB1(self, service_name, path, file_obj, callback, errback, starting_offset, max_length, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + path = path.replace('/', '\\') + messages_history = [ ] + + def sendOpen(tid): + m = SMBMessage(self, ComOpenAndxRequest(filename = path, + access_mode = 0x0040, # Sharing mode: Deny nothing to others + open_mode = 0x0001, # Failed if file does not exist + search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM, + timeout = timeout * 1000)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, openCB, errback) + messages_history.append(m) + + def openCB(open_message, **kwargs): + messages_history.append(open_message) + if not open_message.status.hasError: + if max_length == 0: + closeFid(open_message.tid, open_message.payload.fid) + callback(( file_obj, open_message.payload.file_attributes, 0L )) + else: + sendRead(open_message.tid, open_message.payload.fid, starting_offset, open_message.payload.file_attributes, 0L, max_length) + else: + errback(OperationFailure('Failed to retrieve %s on %s: Unable to open file' % ( path, service_name ), messages_history)) + + def sendRead(tid, fid, offset, file_attributes, read_len, remaining_len): + read_count = self.max_raw_size - 2 + m = SMBMessage(self, ComReadAndxRequest(fid = fid, + offset = offset, + max_return_bytes_count = read_count, + min_return_bytes_count = min(0xFFFF, read_count))) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, readCB, errback, fid = fid, offset = offset, file_attributes = file_attributes, + read_len = read_len, remaining_len = remaining_len) + + def readCB(read_message, **kwargs): + # To avoid crazy memory usage when retrieving large files, we do not save every read_message in messages_history. + if not read_message.status.hasError: + read_len = kwargs['read_len'] + remaining_len = kwargs['remaining_len'] + data_len = read_message.payload.data_length + if max_length > 0: + if data_len > remaining_len: + file_obj.write(read_message.payload.data[:remaining_len]) + read_len += remaining_len + remaining_len = 0 + else: + file_obj.write(read_message.payload.data) + remaining_len -= data_len + read_len += data_len + else: + file_obj.write(read_message.payload.data) + read_len += data_len + + if (max_length > 0 and remaining_len <= 0) or data_len < (self.max_raw_size - 2): + closeFid(read_message.tid, kwargs['fid']) + callback(( file_obj, kwargs['file_attributes'], read_len )) # Note that this is a tuple of 3-elements + else: + sendRead(read_message.tid, kwargs['fid'], kwargs['offset']+data_len, kwargs['file_attributes'], read_len, remaining_len) + else: + messages_history.append(read_message) + closeFid(read_message.tid, kwargs['fid']) + errback(OperationFailure('Failed to retrieve %s on %s: Read failed' % ( path, service_name ), messages_history)) + + def closeFid(tid, fid): + m = SMBMessage(self, ComCloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + messages_history.append(m) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendOpen(connect_message.tid) + else: + errback(OperationFailure('Failed to retrieve %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendOpen(self.connected_trees[service_name]) + + def _storeFile_SMB1(self, service_name, path, file_obj, callback, errback, timeout = 30): + self._storeFileFromOffset_SMB1(service_name, path, file_obj, callback, errback, 0L, True, timeout) + + def _storeFileFromOffset_SMB1(self, service_name, path, file_obj, callback, errback, starting_offset, truncate = False, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + path = path.replace('/', '\\') + messages_history = [ ] + + def sendOpen(tid): + m = SMBMessage(self, ComOpenAndxRequest(filename = path, + access_mode = 0x0041, # Sharing mode: Deny nothing to others + Open for writing + open_mode = 0x0012 if truncate else 0x0011, # Create file if file does not exist. Overwrite or append depending on truncate parameter. + search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM, + timeout = timeout * 1000)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, openCB, errback) + messages_history.append(m) + + def openCB(open_message, **kwargs): + messages_history.append(open_message) + if not open_message.status.hasError: + sendWrite(open_message.tid, open_message.payload.fid, starting_offset) + else: + errback(OperationFailure('Failed to store %s on %s: Unable to open file' % ( path, service_name ), messages_history)) + + def sendWrite(tid, fid, offset): + # For message signing, the total SMB message size must be not exceed the max_buffer_size. Non-message signing does not have this limitation + write_count = min((self.is_signing_active and (self.max_buffer_size-64)) or self.max_raw_size, 0xFFFF-1) # Need to minus 1 byte from 0xFFFF because of the first NULL byte in the ComWriteAndxRequest message data + data_bytes = file_obj.read(write_count) + data_len = len(data_bytes) + if data_len > 0: + m = SMBMessage(self, ComWriteAndxRequest(fid = fid, offset = offset, data_bytes = data_bytes)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, writeCB, errback, fid = fid, offset = offset+data_len) + else: + closeFid(tid, fid) + callback(( file_obj, offset )) # Note that this is a tuple of 2-elements + + def writeCB(write_message, **kwargs): + # To avoid crazy memory usage when saving large files, we do not save every write_message in messages_history. + if not write_message.status.hasError: + sendWrite(write_message.tid, kwargs['fid'], kwargs['offset']) + else: + messages_history.append(write_message) + closeFid(write_message.tid, kwargs['fid']) + errback(OperationFailure('Failed to store %s on %s: Write failed' % ( path, service_name ), messages_history)) + + def closeFid(tid, fid): + m = SMBMessage(self, ComCloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + messages_history.append(m) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendOpen(connect_message.tid) + else: + errback(OperationFailure('Failed to store %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendOpen(self.connected_trees[service_name]) + + def _deleteFiles_SMB1(self, service_name, path_file_pattern, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + path = path_file_pattern.replace('/', '\\') + messages_history = [ ] + + def sendDelete(tid): + m = SMBMessage(self, ComDeleteRequest(filename_pattern = path, + search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback) + messages_history.append(m) + + def deleteCB(delete_message, **kwargs): + messages_history.append(delete_message) + if not delete_message.status.hasError: + callback(path_file_pattern) + else: + errback(OperationFailure('Failed to store %s on %s: Delete failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendDelete(connect_message.tid) + else: + errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendDelete(self.connected_trees[service_name]) + + def _resetFileAttributes_SMB1(self, service_name, path_file_pattern, callback, errback, timeout = 30): + raise NotReadyError('resetFileAttributes is not yet implemented for SMB1') + + def _createDirectory_SMB1(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + path = path.replace('/', '\\') + messages_history = [ ] + + def sendCreate(tid): + m = SMBMessage(self, ComCreateDirectoryRequest(path)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, createCB, errback) + messages_history.append(m) + + def createCB(create_message, **kwargs): + messages_history.append(create_message) + if not create_message.status.hasError: + callback(path) + else: + errback(OperationFailure('Failed to create directory %s on %s: Create failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendCreate(connect_message.tid) + else: + errback(OperationFailure('Failed to create directory %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendCreate(self.connected_trees[service_name]) + + def _deleteDirectory_SMB1(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + path = path.replace('/', '\\') + messages_history = [ ] + + def sendDelete(tid): + m = SMBMessage(self, ComDeleteDirectoryRequest(path)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, deleteCB, errback) + messages_history.append(m) + + def deleteCB(delete_message, **kwargs): + messages_history.append(delete_message) + if not delete_message.status.hasError: + callback(path) + else: + errback(OperationFailure('Failed to delete directory %s on %s: Delete failed' % ( path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendDelete(connect_message.tid) + else: + errback(OperationFailure('Failed to delete %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendDelete(self.connected_trees[service_name]) + + def _rename_SMB1(self, service_name, old_path, new_path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + new_path = new_path.replace('/', '\\') + old_path = old_path.replace('/', '\\') + messages_history = [ ] + + def sendRename(tid): + m = SMBMessage(self, ComRenameRequest(old_path = old_path, + new_path = new_path, + search_attributes = SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, renameCB, errback) + messages_history.append(m) + + def renameCB(rename_message, **kwargs): + messages_history.append(rename_message) + if not rename_message.status.hasError: + callback(( old_path, new_path )) # Note that this is a tuple of 2-elements + else: + errback(OperationFailure('Failed to rename %s on %s: Rename failed' % ( old_path, service_name ), messages_history)) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendRename(connect_message.tid) + else: + errback(OperationFailure('Failed to rename %s on %s: Unable to connect to shared device' % ( old_path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendRename(self.connected_trees[service_name]) + + def _listSnapshots_SMB1(self, service_name, path, callback, errback, timeout = 30): + if not self.has_authenticated: + raise NotReadyError('SMB connection not authenticated') + + expiry_time = time.time() + timeout + path = path.replace('/', '\\') + if not path.endswith('\\'): + path += '\\' + messages_history = [ ] + results = [ ] + + def sendOpen(tid): + m = SMBMessage(self, ComOpenAndxRequest(filename = path, + access_mode = 0x0040, # Sharing mode: Deny nothing to others + open_mode = 0x0001, # Failed if file does not exist + search_attributes = 0, + timeout = timeout * 1000)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, openCB, errback) + messages_history.append(m) + + def openCB(open_message, **kwargs): + messages_history.append(open_message) + if not open_message.status.hasError: + sendEnumSnapshots(open_message.tid, open_message.payload.fid) + else: + errback(OperationFailure('Failed to list snapshots %s on %s: Unable to open path' % ( path, service_name ), messages_history)) + + def sendEnumSnapshots(tid, fid): + # [MS-CIFS]: 2.2.7.2 + # [MS-SMB]: 2.2.7.2.1 + setup_bytes = struct.pack('<IHBB', + 0x00144064, # [MS-SMB]: 2.2.7.2.1 + fid, # FID + 0x01, # IsFctl + 0) # IsFlags + m = SMBMessage(self, ComNTTransactRequest(function = 0x0002, # NT_TRANSACT_IOCTL. [MS-CIFS]: 2.2.7.2.1 + max_params_count = 0, + max_data_count = 0xFFFF, + max_setup_count = 0, + setup_bytes = setup_bytes)) + m.tid = tid + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, enumSnapshotsCB, errback, tid = tid, fid = fid) + messages_history.append(m) + + def enumSnapshotsCB(enum_message, **kwargs): + messages_history.append(enum_message) + if not enum_message.status.hasError: + results = [ ] + snapshots_count = struct.unpack('<I', enum_message.payload.data_bytes[4:8])[0] + for i in range(0, snapshots_count): + s = enum_message.payload.data_bytes[12+i*50:12+48+i*50].decode('UTF-16LE') + results.append(datetime(*map(int, ( s[5:9], s[10:12], s[13:15], s[16:18], s[19:21], s[22:24] )))) + closeFid(kwargs['tid'], kwargs['fid']) + callback(results) + else: + closeFid(kwargs['tid'], kwargs['fid']) + errback(OperationFailure('Failed to list snapshots %s on %s: Unable to list snapshots on path' % ( path, service_name ), messages_history)) + + def closeFid(tid, fid): + m = SMBMessage(self, ComCloseRequest(fid)) + m.tid = tid + self._sendSMBMessage(m) + messages_history.append(m) + + if not self.connected_trees.has_key(service_name): + def connectCB(connect_message, **kwargs): + messages_history.append(connect_message) + if not connect_message.status.hasError: + self.connected_trees[service_name] = connect_message.tid + sendOpen(connect_message.tid) + else: + errback(OperationFailure('Failed to list snapshots %s on %s: Unable to connect to shared device' % ( path, service_name ), messages_history)) + + m = SMBMessage(self, ComTreeConnectAndxRequest(r'\\%s\%s' % ( self.remote_name.upper(), service_name ), SERVICE_ANY, '')) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, expiry_time, connectCB, errback, path = service_name) + messages_history.append(m) + else: + sendOpen(self.connected_trees[service_name]) + + def _echo_SMB1(self, data, callback, errback, timeout = 30): + messages_history = [ ] + + if not isinstance(data, type(b'')): + raise TypeError('Echo data must be %s not %s' % (type(b'').__name__, type(data).__name__)) + + def echoCB(echo_message, **kwargs): + messages_history.append(echo_message) + if not echo_message.status.hasError: + callback(echo_message.payload.data) + else: + errback(OperationFailure('Echo failed', messages_history)) + + m = SMBMessage(self, ComEchoRequest(echo_data = data)) + self._sendSMBMessage(m) + self.pending_requests[m.mid] = _PendingRequest(m.mid, int(time.time()) + timeout, echoCB, errback) + messages_history.append(m) + + def _extractLastPathComponent(self, path): + return path.replace('\\', '/').split('/')[-1] + + +class SharedDevice: + """ + Contains information about a single shared device on the remote server. + + The following attributes are available: + + * name : An unicode string containing the name of the shared device + * comments : An unicode string containing the user description of the shared device + """ + + # The following constants are taken from [MS-SRVS]: 2.2.2.4 + # They are used to identify the type of shared resource from the results from the NetrShareEnum in Server Service RPC + DISK_TREE = 0x00 + PRINT_QUEUE = 0x01 + COMM_DEVICE = 0x02 + IPC = 0x03 + + def __init__(self, type, name, comments): + self._type = type + self.name = name #: An unicode string containing the name of the shared device + self.comments = comments #: An unicode string containing the user description of the shared device + + @property + def type(self): + """ + Returns one of the following integral constants. + - SharedDevice.DISK_TREE + - SharedDevice.PRINT_QUEUE + - SharedDevice.COMM_DEVICE + - SharedDevice.IPC + """ + return self._type & 0xFFFF + + @property + def isSpecial(self): + """ + Returns True if this shared device is a special share reserved for interprocess communication (IPC$) + or remote administration of the server (ADMIN$). Can also refer to administrative shares such as + C$, D$, E$, and so forth + """ + return bool(self._type & 0x80000000) + + @property + def isTemporary(self): + """ + Returns True if this is a temporary share that is not persisted for creation each time the file server initializes. + """ + return bool(self._type & 0x40000000) + + def __unicode__(self): + return u'Shared device: %s (type:0x%02x comments:%s)' % (self.name, self.type, self.comments ) + + +class SharedFile: + """ + Contain information about a file/folder entry that is shared on the shared device. + + As an application developer, you should not need to instantiate a *SharedFile* instance directly in your application. + These *SharedFile* instances are usually returned via a call to *listPath* method in :doc:`smb.SMBProtocol.SMBProtocolFactory<smb_SMBProtocolFactory>`. + + If you encounter *SharedFile* instance where its short_name attribute is empty but the filename attribute contains a short name which does not correspond + to any files/folders on your remote shared device, it could be that the original filename on the file/folder entry on the shared device contains + one of these prohibited characters: "\/[]:+|<>=;?,* (see [MS-CIFS]: 2.2.1.1.1 for more details). + + The following attributes are available: + + * create_time : Float value in number of seconds since 1970-01-01 00:00:00 to the time of creation of this file resource on the remote server + * last_access_time : Float value in number of seconds since 1970-01-01 00:00:00 to the time of last access of this file resource on the remote server + * last_write_time : Float value in number of seconds since 1970-01-01 00:00:00 to the time of last modification of this file resource on the remote server + * last_attr_change_time : Float value in number of seconds since 1970-01-01 00:00:00 to the time of last attribute change of this file resource on the remote server + * file_size : File size in number of bytes + * alloc_size : Total number of bytes allocated to store this file + * file_attributes : A SMB_EXT_FILE_ATTR integer value. See [MS-CIFS]: 2.2.1.2.3. You can perform bit-wise tests to determine the status of the file using the ATTR_xxx constants in smb_constants.py. + * short_name : Unicode string containing the short name of this file (usually in 8.3 notation) + * filename : Unicode string containing the long filename of this file. Each OS has a limit to the length of this file name. On Windows, it is 256 characters. + * file_id : Long value representing the file reference number for the file. If the remote system does not support this field, this field will be None or 0. See [MS-FSCC]: 2.4.17 + """ + + def __init__(self, create_time, last_access_time, last_write_time, last_attr_change_time, file_size, alloc_size, file_attributes, short_name, filename, file_id=None): + self.create_time = create_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of creation of this file resource on the remote server + self.last_access_time = last_access_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of last access of this file resource on the remote server + self.last_write_time = last_write_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of last modification of this file resource on the remote server + self.last_attr_change_time = last_attr_change_time #: Float value in number of seconds since 1970-01-01 00:00:00 to the time of last attribute change of this file resource on the remote server + self.file_size = file_size #: File size in number of bytes + self.alloc_size = alloc_size #: Total number of bytes allocated to store this file + self.file_attributes = file_attributes #: A SMB_EXT_FILE_ATTR integer value. See [MS-CIFS]: 2.2.1.2.3. You can perform bit-wise tests to determine the status of the file using the ATTR_xxx constants in smb_constants.py. + self.short_name = short_name #: Unicode string containing the short name of this file (usually in 8.3 notation) + self.filename = filename #: Unicode string containing the long filename of this file. Each OS has a limit to the length of this file name. On Windows, it is 256 characters. + self.file_id = file_id #: Long value representing the file reference number for the file. If the remote system does not support this field, this field will be None or 0. See [MS-FSCC]: 2.4.17 + + @property + def isDirectory(self): + """A convenient property to return True if this file resource is a directory on the remote server""" + return bool(self.file_attributes & ATTR_DIRECTORY) + + @property + def isReadOnly(self): + """A convenient property to return True if this file resource is read-only on the remote server""" + return bool(self.file_attributes & ATTR_READONLY) + + @property + def isNormal(self): + """ + A convenient property to return True if this is a normal file. + + Note that pysmb defines a normal file as a file entry that is not read-only, not hidden, not system, not archive and not a directory. + It ignores other attributes like compression, indexed, sparse, temporary and encryption. + """ + return (self.file_attributes==ATTR_NORMAL) or ((self.file_attributes & 0xff)==0) + + def __unicode__(self): + return u'Shared file: %s (FileSize:%d bytes, isDirectory:%s)' % ( self.filename, self.file_size, self.isDirectory ) + + +class _PendingRequest: + + def __init__(self, mid, expiry_time, callback, errback, **kwargs): + self.mid = mid + self.expiry_time = expiry_time + self.callback = callback + self.errback = errback + self.kwargs = kwargs diff --git a/plugin.video.alfa/lib/sambatools/smb/ntlm.py b/plugin.video.alfa/lib/sambatools/smb/ntlm.py index f8bff724..ae6fc9e7 100755 --- a/plugin.video.alfa/lib/sambatools/smb/ntlm.py +++ b/plugin.video.alfa/lib/sambatools/smb/ntlm.py @@ -1,249 +1,248 @@ -import hmac -import random -import struct - -from utils.pyDes import des - -try: - import hashlib - hashlib.new('md4') - - def MD4(): return hashlib.new('md4') -except ( ImportError, ValueError ): - from utils.md4 import MD4 - -try: - import hashlib - def MD5(s): return hashlib.md5(s) -except ImportError: - import md5 - def MD5(s): return md5.new(s) - -################ -# NTLMv2 Methods -################ - -# The following constants are defined in accordance to [MS-NLMP]: 2.2.2.5 - -NTLM_NegotiateUnicode = 0x00000001 -NTLM_NegotiateOEM = 0x00000002 -NTLM_RequestTarget = 0x00000004 -NTLM_Unknown9 = 0x00000008 -NTLM_NegotiateSign = 0x00000010 -NTLM_NegotiateSeal = 0x00000020 -NTLM_NegotiateDatagram = 0x00000040 -NTLM_NegotiateLanManagerKey = 0x00000080 -NTLM_Unknown8 = 0x00000100 -NTLM_NegotiateNTLM = 0x00000200 -NTLM_NegotiateNTOnly = 0x00000400 -NTLM_Anonymous = 0x00000800 -NTLM_NegotiateOemDomainSupplied = 0x00001000 -NTLM_NegotiateOemWorkstationSupplied = 0x00002000 -NTLM_Unknown6 = 0x00004000 -NTLM_NegotiateAlwaysSign = 0x00008000 -NTLM_TargetTypeDomain = 0x00010000 -NTLM_TargetTypeServer = 0x00020000 -NTLM_TargetTypeShare = 0x00040000 -NTLM_NegotiateExtendedSecurity = 0x00080000 -NTLM_NegotiateIdentify = 0x00100000 -NTLM_Unknown5 = 0x00200000 -NTLM_RequestNonNTSessionKey = 0x00400000 -NTLM_NegotiateTargetInfo = 0x00800000 -NTLM_Unknown4 = 0x01000000 -NTLM_NegotiateVersion = 0x02000000 -NTLM_Unknown3 = 0x04000000 -NTLM_Unknown2 = 0x08000000 -NTLM_Unknown1 = 0x10000000 -NTLM_Negotiate128 = 0x20000000 -NTLM_NegotiateKeyExchange = 0x40000000 -NTLM_Negotiate56 = 0x80000000 - -NTLM_FLAGS = NTLM_NegotiateUnicode | \ - NTLM_RequestTarget | \ - NTLM_NegotiateNTLM | \ - NTLM_NegotiateAlwaysSign | \ - NTLM_NegotiateExtendedSecurity | \ - NTLM_NegotiateTargetInfo | \ - NTLM_NegotiateVersion | \ - NTLM_Negotiate128 | \ - NTLM_NegotiateKeyExchange | \ - NTLM_Negotiate56 - -def generateNegotiateMessage(): - """ - References: - =========== - - [MS-NLMP]: 2.2.1.1 - """ - s = struct.pack('<8sII8s8s8s', - 'NTLMSSP\0', 0x01, NTLM_FLAGS, - '\0' * 8, # Domain - '\0' * 8, # Workstation - '\x06\x00\x72\x17\x00\x00\x00\x0F') # Version [MS-NLMP]: 2.2.2.10 - return s - - -def generateAuthenticateMessage(challenge_flags, nt_response, lm_response, session_key, user, domain = 'WORKGROUP', workstation = 'LOCALHOST'): - """ - References: - =========== - - [MS-NLMP]: 2.2.1.3 - """ - FORMAT = '<8sIHHIHHIHHIHHIHHIHHII' - FORMAT_SIZE = struct.calcsize(FORMAT) - - lm_response_length = len(lm_response) - lm_response_offset = FORMAT_SIZE - nt_response_length = len(nt_response) - nt_response_offset = lm_response_offset + lm_response_length - domain_unicode = domain.encode('UTF-16LE') - domain_length = len(domain_unicode) - domain_offset = nt_response_offset + nt_response_length - - padding = '' - if domain_offset % 2 != 0: - padding = '\0' - domain_offset += 1 - - user_unicode = user.encode('UTF-16LE') - user_length = len(user_unicode) - user_offset = domain_offset + domain_length - workstation_unicode = workstation.encode('UTF-16LE') - workstation_length = len(workstation_unicode) - workstation_offset = user_offset + user_length - session_key_length = len(session_key) - session_key_offset = workstation_offset + workstation_length - - auth_flags = challenge_flags - auth_flags &= ~NTLM_NegotiateVersion - - s = struct.pack(FORMAT, - 'NTLMSSP\0', 0x03, - lm_response_length, lm_response_length, lm_response_offset, - nt_response_length, nt_response_length, nt_response_offset, - domain_length, domain_length, domain_offset, - user_length, user_length, user_offset, - workstation_length, workstation_length, workstation_offset, - session_key_length, session_key_length, session_key_offset, - auth_flags) - - return s + lm_response + nt_response + padding + domain_unicode + user_unicode + workstation_unicode + session_key - - -def decodeChallengeMessage(ntlm_data): - """ - References: - =========== - - [MS-NLMP]: 2.2.1.2 - - [MS-NLMP]: 2.2.2.1 (AV_PAIR) - """ - FORMAT = '<8sIHHII8s8sHHI' - FORMAT_SIZE = struct.calcsize(FORMAT) - - signature, message_type, \ - targetname_len, targetname_maxlen, targetname_offset, \ - flags, challenge, _, \ - targetinfo_len, targetinfo_maxlen, targetinfo_offset, \ - = struct.unpack(FORMAT, ntlm_data[:FORMAT_SIZE]) - - assert signature == 'NTLMSSP\0' - assert message_type == 0x02 - - return challenge, flags, ntlm_data[targetinfo_offset:targetinfo_offset+targetinfo_len] - - -def generateChallengeResponseV2(password, user, server_challenge, server_info, domain = '', client_challenge = None): - client_timestamp = '\0' * 8 - - if not client_challenge: - client_challenge = '' - for i in range(0, 8): - client_challenge += chr(random.getrandbits(8)) - assert len(client_challenge) == 8 - - d = MD4() - d.update(password.encode('UTF-16LE')) - ntlm_hash = d.digest() # The NT password hash - response_key = hmac.new(ntlm_hash, (user.upper() + domain).encode('UTF-16LE')).digest() # The NTLMv2 password hash. In [MS-NLMP], this is the result of NTOWFv2 and LMOWFv2 functions - temp = client_timestamp + client_challenge + domain.encode('UTF-16LE') + server_info - - nt_challenge_response = hmac.new(response_key, server_challenge + temp).digest() - lm_challenge_response = hmac.new(response_key, server_challenge + client_challenge).digest() + client_challenge - session_key = hmac.new(response_key, nt_challenge_response).digest() - - return nt_challenge_response, lm_challenge_response, session_key - - -################ -# NTLMv1 Methods -################ - -def expandDesKey(key): - """Expand the key from a 7-byte password key into a 8-byte DES key""" - s = chr(((ord(key[0]) >> 1) & 0x7f) << 1) - s = s + chr(((ord(key[0]) & 0x01) << 6 | ((ord(key[1]) >> 2) & 0x3f)) << 1) - s = s + chr(((ord(key[1]) & 0x03) << 5 | ((ord(key[2]) >> 3) & 0x1f)) << 1) - s = s + chr(((ord(key[2]) & 0x07) << 4 | ((ord(key[3]) >> 4) & 0x0f)) << 1) - s = s + chr(((ord(key[3]) & 0x0f) << 3 | ((ord(key[4]) >> 5) & 0x07)) << 1) - s = s + chr(((ord(key[4]) & 0x1f) << 2 | ((ord(key[5]) >> 6) & 0x03)) << 1) - s = s + chr(((ord(key[5]) & 0x3f) << 1 | ((ord(key[6]) >> 7) & 0x01)) << 1) - s = s + chr((ord(key[6]) & 0x7f) << 1) - return s - - -def DESL(K, D): - """ - References: - =========== - - http://ubiqx.org/cifs/SMB.html (2.8.3.4) - - [MS-NLMP]: Section 6 - """ - d1 = des(expandDesKey(K[0:7])) - d2 = des(expandDesKey(K[7:14])) - d3 = des(expandDesKey(K[14:16] + '\0' * 5)) - return d1.encrypt(D) + d2.encrypt(D) + d3.encrypt(D) - - -def generateChallengeResponseV1(password, server_challenge, has_extended_security = False, client_challenge = None): - """ - Generate a NTLMv1 response - - @param password: User password string - @param server_challange: A 8-byte challenge string sent from the server - @param has_extended_security: A boolean value indicating whether NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY flag is enabled in the NTLM negFlag - @param client_challenge: A 8-byte string representing client challenge. If None, it will be generated randomly if needed by the response generation - @return: a tuple of ( NT challenge response string, LM challenge response string ) - - References: - =========== - - http://ubiqx.org/cifs/SMB.html (2.8.3.3 and 2.8.3.4) - - [MS-NLMP]: 3.3.1 - """ - _password = (password.upper() + '\0' * 14)[:14] - d1 = des(expandDesKey(_password[:7])) - d2 = des(expandDesKey(_password[7:])) - lm_response_key = d1.encrypt("KGS!@#$%") + d2.encrypt("KGS!@#$%") # LM password hash. In [MS-NLMP], this is the result of LMOWFv1 function - - d = MD4() - d.update(password.encode('UTF-16LE')) - nt_response_key = d.digest() # In [MS-NLMP], this is the result of NTOWFv1 function - - if has_extended_security: - if not client_challenge: - client_challenge = '' - for i in range(0, 8): - client_challenge += chr(random.getrandbits(8)) - - assert len(client_challenge) == 8 - - lm_challenge_response = client_challenge + '\0'*16 - nt_challenge_response = DESL(nt_response_key, MD5(server_challenge + client_challenge).digest()[0:8]) - else: - nt_challenge_response = DESL(nt_response_key, server_challenge) # The result after DESL is the NT response - lm_challenge_response = DESL(lm_response_key, server_challenge) # The result after DESL is the LM response - - d = MD4() - d.update(nt_response_key) - session_key = d.digest() - - return nt_challenge_response, lm_challenge_response, session_key + +import types, hmac, binascii, struct, random +from utils.pyDes import des + +try: + import hashlib + hashlib.new('md4') + + def MD4(): return hashlib.new('md4') +except ( ImportError, ValueError ): + from utils.md4 import MD4 + +try: + import hashlib + def MD5(s): return hashlib.md5(s) +except ImportError: + import md5 + def MD5(s): return md5.new(s) + +################ +# NTLMv2 Methods +################ + +# The following constants are defined in accordance to [MS-NLMP]: 2.2.2.5 + +NTLM_NegotiateUnicode = 0x00000001 +NTLM_NegotiateOEM = 0x00000002 +NTLM_RequestTarget = 0x00000004 +NTLM_Unknown9 = 0x00000008 +NTLM_NegotiateSign = 0x00000010 +NTLM_NegotiateSeal = 0x00000020 +NTLM_NegotiateDatagram = 0x00000040 +NTLM_NegotiateLanManagerKey = 0x00000080 +NTLM_Unknown8 = 0x00000100 +NTLM_NegotiateNTLM = 0x00000200 +NTLM_NegotiateNTOnly = 0x00000400 +NTLM_Anonymous = 0x00000800 +NTLM_NegotiateOemDomainSupplied = 0x00001000 +NTLM_NegotiateOemWorkstationSupplied = 0x00002000 +NTLM_Unknown6 = 0x00004000 +NTLM_NegotiateAlwaysSign = 0x00008000 +NTLM_TargetTypeDomain = 0x00010000 +NTLM_TargetTypeServer = 0x00020000 +NTLM_TargetTypeShare = 0x00040000 +NTLM_NegotiateExtendedSecurity = 0x00080000 +NTLM_NegotiateIdentify = 0x00100000 +NTLM_Unknown5 = 0x00200000 +NTLM_RequestNonNTSessionKey = 0x00400000 +NTLM_NegotiateTargetInfo = 0x00800000 +NTLM_Unknown4 = 0x01000000 +NTLM_NegotiateVersion = 0x02000000 +NTLM_Unknown3 = 0x04000000 +NTLM_Unknown2 = 0x08000000 +NTLM_Unknown1 = 0x10000000 +NTLM_Negotiate128 = 0x20000000 +NTLM_NegotiateKeyExchange = 0x40000000 +NTLM_Negotiate56 = 0x80000000 + +NTLM_FLAGS = NTLM_NegotiateUnicode | \ + NTLM_RequestTarget | \ + NTLM_NegotiateNTLM | \ + NTLM_NegotiateAlwaysSign | \ + NTLM_NegotiateExtendedSecurity | \ + NTLM_NegotiateTargetInfo | \ + NTLM_NegotiateVersion | \ + NTLM_Negotiate128 | \ + NTLM_NegotiateKeyExchange | \ + NTLM_Negotiate56 + +def generateNegotiateMessage(): + """ + References: + =========== + - [MS-NLMP]: 2.2.1.1 + """ + s = struct.pack('<8sII8s8s8s', + 'NTLMSSP\0', 0x01, NTLM_FLAGS, + '\0' * 8, # Domain + '\0' * 8, # Workstation + '\x06\x00\x72\x17\x00\x00\x00\x0F') # Version [MS-NLMP]: 2.2.2.10 + return s + + +def generateAuthenticateMessage(challenge_flags, nt_response, lm_response, session_key, user, domain = 'WORKGROUP', workstation = 'LOCALHOST'): + """ + References: + =========== + - [MS-NLMP]: 2.2.1.3 + """ + FORMAT = '<8sIHHIHHIHHIHHIHHIHHII' + FORMAT_SIZE = struct.calcsize(FORMAT) + + lm_response_length = len(lm_response) + lm_response_offset = FORMAT_SIZE + nt_response_length = len(nt_response) + nt_response_offset = lm_response_offset + lm_response_length + domain_unicode = domain.encode('UTF-16LE') + domain_length = len(domain_unicode) + domain_offset = nt_response_offset + nt_response_length + + padding = '' + if domain_offset % 2 != 0: + padding = '\0' + domain_offset += 1 + + user_unicode = user.encode('UTF-16LE') + user_length = len(user_unicode) + user_offset = domain_offset + domain_length + workstation_unicode = workstation.encode('UTF-16LE') + workstation_length = len(workstation_unicode) + workstation_offset = user_offset + user_length + session_key_length = len(session_key) + session_key_offset = workstation_offset + workstation_length + + auth_flags = challenge_flags + auth_flags &= ~NTLM_NegotiateVersion + + s = struct.pack(FORMAT, + 'NTLMSSP\0', 0x03, + lm_response_length, lm_response_length, lm_response_offset, + nt_response_length, nt_response_length, nt_response_offset, + domain_length, domain_length, domain_offset, + user_length, user_length, user_offset, + workstation_length, workstation_length, workstation_offset, + session_key_length, session_key_length, session_key_offset, + auth_flags) + + return s + lm_response + nt_response + padding + domain_unicode + user_unicode + workstation_unicode + session_key + + +def decodeChallengeMessage(ntlm_data): + """ + References: + =========== + - [MS-NLMP]: 2.2.1.2 + - [MS-NLMP]: 2.2.2.1 (AV_PAIR) + """ + FORMAT = '<8sIHHII8s8sHHI' + FORMAT_SIZE = struct.calcsize(FORMAT) + + signature, message_type, \ + targetname_len, targetname_maxlen, targetname_offset, \ + flags, challenge, _, \ + targetinfo_len, targetinfo_maxlen, targetinfo_offset, \ + = struct.unpack(FORMAT, ntlm_data[:FORMAT_SIZE]) + + assert signature == 'NTLMSSP\0' + assert message_type == 0x02 + + return challenge, flags, ntlm_data[targetinfo_offset:targetinfo_offset+targetinfo_len] + + +def generateChallengeResponseV2(password, user, server_challenge, server_info, domain = '', client_challenge = None): + client_timestamp = '\0' * 8 + + if not client_challenge: + client_challenge = '' + for i in range(0, 8): + client_challenge += chr(random.getrandbits(8)) + assert len(client_challenge) == 8 + + d = MD4() + d.update(password.encode('UTF-16LE')) + ntlm_hash = d.digest() # The NT password hash + response_key = hmac.new(ntlm_hash, (user.upper() + domain).encode('UTF-16LE')).digest() # The NTLMv2 password hash. In [MS-NLMP], this is the result of NTOWFv2 and LMOWFv2 functions + temp = '\x01\x01' + '\0'*6 + client_timestamp + client_challenge + '\0'*4 + server_info + ntproofstr = hmac.new(response_key, server_challenge + temp).digest() + + nt_challenge_response = ntproofstr + temp + lm_challenge_response = hmac.new(response_key, server_challenge + client_challenge).digest() + client_challenge + session_key = hmac.new(response_key, ntproofstr).digest() + + return nt_challenge_response, lm_challenge_response, session_key + + +################ +# NTLMv1 Methods +################ + +def expandDesKey(key): + """Expand the key from a 7-byte password key into a 8-byte DES key""" + s = chr(((ord(key[0]) >> 1) & 0x7f) << 1) + s = s + chr(((ord(key[0]) & 0x01) << 6 | ((ord(key[1]) >> 2) & 0x3f)) << 1) + s = s + chr(((ord(key[1]) & 0x03) << 5 | ((ord(key[2]) >> 3) & 0x1f)) << 1) + s = s + chr(((ord(key[2]) & 0x07) << 4 | ((ord(key[3]) >> 4) & 0x0f)) << 1) + s = s + chr(((ord(key[3]) & 0x0f) << 3 | ((ord(key[4]) >> 5) & 0x07)) << 1) + s = s + chr(((ord(key[4]) & 0x1f) << 2 | ((ord(key[5]) >> 6) & 0x03)) << 1) + s = s + chr(((ord(key[5]) & 0x3f) << 1 | ((ord(key[6]) >> 7) & 0x01)) << 1) + s = s + chr((ord(key[6]) & 0x7f) << 1) + return s + + +def DESL(K, D): + """ + References: + =========== + - http://ubiqx.org/cifs/SMB.html (2.8.3.4) + - [MS-NLMP]: Section 6 + """ + d1 = des(expandDesKey(K[0:7])) + d2 = des(expandDesKey(K[7:14])) + d3 = des(expandDesKey(K[14:16] + '\0' * 5)) + return d1.encrypt(D) + d2.encrypt(D) + d3.encrypt(D) + + +def generateChallengeResponseV1(password, server_challenge, has_extended_security = False, client_challenge = None): + """ + Generate a NTLMv1 response + + @param password: User password string + @param server_challange: A 8-byte challenge string sent from the server + @param has_extended_security: A boolean value indicating whether NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY flag is enabled in the NTLM negFlag + @param client_challenge: A 8-byte string representing client challenge. If None, it will be generated randomly if needed by the response generation + @return: a tuple of ( NT challenge response string, LM challenge response string ) + + References: + =========== + - http://ubiqx.org/cifs/SMB.html (2.8.3.3 and 2.8.3.4) + - [MS-NLMP]: 3.3.1 + """ + _password = (password.upper() + '\0' * 14)[:14] + d1 = des(expandDesKey(_password[:7])) + d2 = des(expandDesKey(_password[7:])) + lm_response_key = d1.encrypt("KGS!@#$%") + d2.encrypt("KGS!@#$%") # LM password hash. In [MS-NLMP], this is the result of LMOWFv1 function + + d = MD4() + d.update(password.encode('UTF-16LE')) + nt_response_key = d.digest() # In [MS-NLMP], this is the result of NTOWFv1 function + + if has_extended_security: + if not client_challenge: + client_challenge = '' + for i in range(0, 8): + client_challenge += chr(random.getrandbits(8)) + + assert len(client_challenge) == 8 + + lm_challenge_response = client_challenge + '\0'*16 + nt_challenge_response = DESL(nt_response_key, MD5(server_challenge + client_challenge).digest()[0:8]) + else: + nt_challenge_response = DESL(nt_response_key, server_challenge) # The result after DESL is the NT response + lm_challenge_response = DESL(lm_response_key, server_challenge) # The result after DESL is the LM response + + d = MD4() + d.update(nt_response_key) + session_key = d.digest() + + return nt_challenge_response, lm_challenge_response, session_key diff --git a/plugin.video.alfa/lib/sambatools/smb/security_descriptors.py b/plugin.video.alfa/lib/sambatools/smb/security_descriptors.py new file mode 100644 index 00000000..9e6ebe14 --- /dev/null +++ b/plugin.video.alfa/lib/sambatools/smb/security_descriptors.py @@ -0,0 +1,367 @@ +""" +This module implements security descriptors, and the partial structures +used in them, as specified in [MS-DTYP]. +""" + +import struct + + +# Security descriptor control flags +# [MS-DTYP]: 2.4.6 +SECURITY_DESCRIPTOR_OWNER_DEFAULTED = 0x0001 +SECURITY_DESCRIPTOR_GROUP_DEFAULTED = 0x0002 +SECURITY_DESCRIPTOR_DACL_PRESENT = 0x0004 +SECURITY_DESCRIPTOR_DACL_DEFAULTED = 0x0008 +SECURITY_DESCRIPTOR_SACL_PRESENT = 0x0010 +SECURITY_DESCRIPTOR_SACL_DEFAULTED = 0x0020 +SECURITY_DESCRIPTOR_SERVER_SECURITY = 0x0040 +SECURITY_DESCRIPTOR_DACL_TRUSTED = 0x0080 +SECURITY_DESCRIPTOR_DACL_COMPUTED_INHERITANCE_REQUIRED = 0x0100 +SECURITY_DESCRIPTOR_SACL_COMPUTED_INHERITANCE_REQUIRED = 0x0200 +SECURITY_DESCRIPTOR_DACL_AUTO_INHERITED = 0x0400 +SECURITY_DESCRIPTOR_SACL_AUTO_INHERITED = 0x0800 +SECURITY_DESCRIPTOR_DACL_PROTECTED = 0x1000 +SECURITY_DESCRIPTOR_SACL_PROTECTED = 0x2000 +SECURITY_DESCRIPTOR_RM_CONTROL_VALID = 0x4000 +SECURITY_DESCRIPTOR_SELF_RELATIVE = 0x8000 + +# ACE types +# [MS-DTYP]: 2.4.4.1 +ACE_TYPE_ACCESS_ALLOWED = 0x00 +ACE_TYPE_ACCESS_DENIED = 0x01 +ACE_TYPE_SYSTEM_AUDIT = 0x02 +ACE_TYPE_SYSTEM_ALARM = 0x03 +ACE_TYPE_ACCESS_ALLOWED_COMPOUND = 0x04 +ACE_TYPE_ACCESS_ALLOWED_OBJECT = 0x05 +ACE_TYPE_ACCESS_DENIED_OBJECT = 0x06 +ACE_TYPE_SYSTEM_AUDIT_OBJECT = 0x07 +ACE_TYPE_SYSTEM_ALARM_OBJECT = 0x08 +ACE_TYPE_ACCESS_ALLOWED_CALLBACK = 0x09 +ACE_TYPE_ACCESS_DENIED_CALLBACK = 0x0A +ACE_TYPE_ACCESS_ALLOWED_CALLBACK_OBJECT = 0x0B +ACE_TYPE_ACCESS_DENIED_CALLBACK_OBJECT = 0x0C +ACE_TYPE_SYSTEM_AUDIT_CALLBACK = 0x0D +ACE_TYPE_SYSTEM_ALARM_CALLBACK = 0x0E +ACE_TYPE_SYSTEM_AUDIT_CALLBACK_OBJECT = 0x0F +ACE_TYPE_SYSTEM_ALARM_CALLBACK_OBJECT = 0x10 +ACE_TYPE_SYSTEM_MANDATORY_LABEL = 0x11 +ACE_TYPE_SYSTEM_RESOURCE_ATTRIBUTE = 0x12 +ACE_TYPE_SYSTEM_SCOPED_POLICY_ID = 0x13 + +# ACE flags +# [MS-DTYP]: 2.4.4.1 +ACE_FLAG_OBJECT_INHERIT = 0x01 +ACE_FLAG_CONTAINER_INHERIT = 0x02 +ACE_FLAG_NO_PROPAGATE_INHERIT = 0x04 +ACE_FLAG_INHERIT_ONLY = 0x08 +ACE_FLAG_INHERITED = 0x10 +ACE_FLAG_SUCCESSFUL_ACCESS = 0x40 +ACE_FLAG_FAILED_ACCESS = 0x80 + +# Pre-defined well-known SIDs +# [MS-DTYP]: 2.4.2.4 +SID_NULL = "S-1-0-0" +SID_EVERYONE = "S-1-1-0" +SID_LOCAL = "S-1-2-0" +SID_CONSOLE_LOGON = "S-1-2-1" +SID_CREATOR_OWNER = "S-1-3-0" +SID_CREATOR_GROUP = "S-1-3-1" +SID_OWNER_SERVER = "S-1-3-2" +SID_GROUP_SERVER = "S-1-3-3" +SID_OWNER_RIGHTS = "S-1-3-4" +SID_NT_AUTHORITY = "S-1-5" +SID_DIALUP = "S-1-5-1" +SID_NETWORK = "S-1-5-2" +SID_BATCH = "S-1-5-3" +SID_INTERACTIVE = "S-1-5-4" +SID_SERVICE = "S-1-5-6" +SID_ANONYMOUS = "S-1-5-7" +SID_PROXY = "S-1-5-8" +SID_ENTERPRISE_DOMAIN_CONTROLLERS = "S-1-5-9" +SID_PRINCIPAL_SELF = "S-1-5-10" +SID_AUTHENTICATED_USERS = "S-1-5-11" +SID_RESTRICTED_CODE = "S-1-5-12" +SID_TERMINAL_SERVER_USER = "S-1-5-13" +SID_REMOTE_INTERACTIVE_LOGON = "S-1-5-14" +SID_THIS_ORGANIZATION = "S-1-5-15" +SID_IUSR = "S-1-5-17" +SID_LOCAL_SYSTEM = "S-1-5-18" +SID_LOCAL_SERVICE = "S-1-5-19" +SID_NETWORK_SERVICE = "S-1-5-20" +SID_COMPOUNDED_AUTHENTICATION = "S-1-5-21-0-0-0-496" +SID_CLAIMS_VALID = "S-1-5-21-0-0-0-497" +SID_BUILTIN_ADMINISTRATORS = "S-1-5-32-544" +SID_BUILTIN_USERS = "S-1-5-32-545" +SID_BUILTIN_GUESTS = "S-1-5-32-546" +SID_POWER_USERS = "S-1-5-32-547" +SID_ACCOUNT_OPERATORS = "S-1-5-32-548" +SID_SERVER_OPERATORS = "S-1-5-32-549" +SID_PRINTER_OPERATORS = "S-1-5-32-550" +SID_BACKUP_OPERATORS = "S-1-5-32-551" +SID_REPLICATOR = "S-1-5-32-552" +SID_ALIAS_PREW2KCOMPACC = "S-1-5-32-554" +SID_REMOTE_DESKTOP = "S-1-5-32-555" +SID_NETWORK_CONFIGURATION_OPS = "S-1-5-32-556" +SID_INCOMING_FOREST_TRUST_BUILDERS = "S-1-5-32-557" +SID_PERFMON_USERS = "S-1-5-32-558" +SID_PERFLOG_USERS = "S-1-5-32-559" +SID_WINDOWS_AUTHORIZATION_ACCESS_GROUP = "S-1-5-32-560" +SID_TERMINAL_SERVER_LICENSE_SERVERS = "S-1-5-32-561" +SID_DISTRIBUTED_COM_USERS = "S-1-5-32-562" +SID_IIS_IUSRS = "S-1-5-32-568" +SID_CRYPTOGRAPHIC_OPERATORS = "S-1-5-32-569" +SID_EVENT_LOG_READERS = "S-1-5-32-573" +SID_CERTIFICATE_SERVICE_DCOM_ACCESS = "S-1-5-32-574" +SID_RDS_REMOTE_ACCESS_SERVERS = "S-1-5-32-575" +SID_RDS_ENDPOINT_SERVERS = "S-1-5-32-576" +SID_RDS_MANAGEMENT_SERVERS = "S-1-5-32-577" +SID_HYPER_V_ADMINS = "S-1-5-32-578" +SID_ACCESS_CONTROL_ASSISTANCE_OPS = "S-1-5-32-579" +SID_REMOTE_MANAGEMENT_USERS = "S-1-5-32-580" +SID_WRITE_RESTRICTED_CODE = "S-1-5-33" +SID_NTLM_AUTHENTICATION = "S-1-5-64-10" +SID_SCHANNEL_AUTHENTICATION = "S-1-5-64-14" +SID_DIGEST_AUTHENTICATION = "S-1-5-64-21" +SID_THIS_ORGANIZATION_CERTIFICATE = "S-1-5-65-1" +SID_NT_SERVICE = "S-1-5-80" +SID_USER_MODE_DRIVERS = "S-1-5-84-0-0-0-0-0" +SID_LOCAL_ACCOUNT = "S-1-5-113" +SID_LOCAL_ACCOUNT_AND_MEMBER_OF_ADMINISTRATORS_GROUP = "S-1-5-114" +SID_OTHER_ORGANIZATION = "S-1-5-1000" +SID_ALL_APP_PACKAGES = "S-1-15-2-1" +SID_ML_UNTRUSTED = "S-1-16-0" +SID_ML_LOW = "S-1-16-4096" +SID_ML_MEDIUM = "S-1-16-8192" +SID_ML_MEDIUM_PLUS = "S-1-16-8448" +SID_ML_HIGH = "S-1-16-12288" +SID_ML_SYSTEM = "S-1-16-16384" +SID_ML_PROTECTED_PROCESS = "S-1-16-20480" +SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY = "S-1-18-1" +SID_SERVICE_ASSERTED_IDENTITY = "S-1-18-2" +SID_FRESH_PUBLIC_KEY_IDENTITY = "S-1-18-3" +SID_KEY_TRUST_IDENTITY = "S-1-18-4" +SID_KEY_PROPERTY_MFA = "S-1-18-5" +SID_KEY_PROPERTY_ATTESTATION = "S-1-18-6" + + +class SID(object): + """ + A Windows security identifier. Represents a single principal, such a + user or a group, as a sequence of numbers consisting of the revision, + identifier authority, and a variable-length list of subauthorities. + + See [MS-DTYP]: 2.4.2 + """ + def __init__(self, revision, identifier_authority, subauthorities): + #: Revision, should always be 1. + self.revision = revision + #: An integer representing the identifier authority. + self.identifier_authority = identifier_authority + #: A list of integers representing all subauthorities. + self.subauthorities = subauthorities + + def __str__(self): + """ + String representation, as specified in [MS-DTYP]: 2.4.2.1 + """ + if self.identifier_authority >= 2**32: + id_auth = '%#x' % (self.identifier_authority,) + else: + id_auth = self.identifier_authority + auths = [self.revision, id_auth] + self.subauthorities + return 'S-' + '-'.join(str(subauth) for subauth in auths) + + def __repr__(self): + return 'SID(%r)' % (str(self),) + + @classmethod + def from_bytes(cls, data, return_tail=False): + revision, subauth_count = struct.unpack('<BB', data[:2]) + identifier_authority = struct.unpack('>Q', '\x00\x00' + data[2:8])[0] + subauth_data = data[8:] + subauthorities = [struct.unpack('<L', subauth_data[4 * i : 4 * (i+1)])[0] + for i in range(subauth_count)] + sid = cls(revision, identifier_authority, subauthorities) + if return_tail: + return sid, subauth_data[4 * subauth_count :] + return sid + + +class ACE(object): + """ + Represents a single access control entry. + + See [MS-DTYP]: 2.4.4 + """ + HEADER_FORMAT = '<BBH' + + def __init__(self, type_, flags, mask, sid, additional_data): + #: An integer representing the type of the ACE. One of the + #: ``ACE_TYPE_*`` constants. Corresponds to the ``AceType`` field + #: from [MS-DTYP] 2.4.4.1. + self.type = type_ + #: An integer bitmask with ACE flags, corresponds to the + #: ``AceFlags`` field. + self.flags = flags + #: An integer representing the ``ACCESS_MASK`` as specified in + #: [MS-DTYP] 2.4.3. + self.mask = mask + #: The :class:`SID` of a trustee. + self.sid = sid + #: A dictionary of additional fields present in the ACE, depending + #: on the type. The following fields can be present: + #: + #: * ``flags`` + #: * ``object_type`` + #: * ``inherited_object_type`` + #: * ``application_data`` + #: * ``attribute_data`` + self.additional_data = additional_data + + def __repr__(self): + return "ACE(type=%#04x, flags=%#04x, mask=%#010x, sid=%s)" % ( + self.type, self.flags, self.mask, self.sid, + ) + + @property + def isInheritOnly(self): + """Convenience property which indicates if this ACE is inherit + only, meaning that it doesn't apply to the object itself.""" + return bool(self.flags & ACE_FLAG_INHERIT_ONLY) + + @classmethod + def from_bytes(cls, data): + header_size = struct.calcsize(cls.HEADER_FORMAT) + header = data[:header_size] + type_, flags, size = struct.unpack(cls.HEADER_FORMAT, header) + + assert len(data) >= size + + body = data[header_size:size] + additional_data = {} + + # In all ACE types, the mask immediately follows the header. + mask = struct.unpack('<I', body[:4])[0] + body = body[4:] + + # All OBJECT-type ACEs contain additional flags, and two GUIDs as + # the following fields. + if type_ in (ACE_TYPE_ACCESS_ALLOWED_OBJECT, + ACE_TYPE_ACCESS_DENIED_OBJECT, + ACE_TYPE_ACCESS_ALLOWED_CALLBACK_OBJECT, + ACE_TYPE_ACCESS_DENIED_CALLBACK_OBJECT, + ACE_TYPE_SYSTEM_AUDIT_OBJECT, + ACE_TYPE_SYSTEM_AUDIT_CALLBACK_OBJECT): + additional_data['flags'] = struct.unpack('<I', body[:4])[0] + additional_data['object_type'] = body[4:20] + additional_data['inherited_object_type'] = body[20:36] + body = body[36:] + + # Then the SID in all types. + sid, body = SID.from_bytes(body, return_tail=True) + + # CALLBACK-type ACEs (and for some obscure reason, + # SYSTEM_AUDIT_OBJECT) have a final tail of application data. + if type_ in (ACE_TYPE_ACCESS_ALLOWED_CALLBACK, + ACE_TYPE_ACCESS_DENIED_CALLBACK, + ACE_TYPE_ACCESS_ALLOWED_CALLBACK_OBJECT, + ACE_TYPE_ACCESS_DENIED_CALLBACK_OBJECT, + ACE_TYPE_SYSTEM_AUDIT_OBJECT, + ACE_TYPE_SYSTEM_AUDIT_CALLBACK, + ACE_TYPE_SYSTEM_AUDIT_CALLBACK_OBJECT): + additional_data['application_data'] = body + + # SYSTEM_RESOURCE_ATTRIBUTE ACEs have a tail of attribute data. + if type_ == ACE_TYPE_SYSTEM_RESOURCE_ATTRIBUTE: + additional_data['attribute_data'] = body + + return cls(type_, flags, mask, sid, additional_data) + + +class ACL(object): + """ + Access control list, encapsulating a sequence of access control + entries. + + See [MS-DTYP]: 2.4.5 + """ + HEADER_FORMAT = '<BBHHH' + + def __init__(self, revision, aces): + #: Integer value of the revision. + self.revision = revision + #: List of :class:`ACE` instances. + self.aces = aces + + def __repr__(self): + return "ACL(%r)" % (self.aces,) + + @classmethod + def from_bytes(cls, data): + revision = None + aces = [] + + header_size = struct.calcsize(cls.HEADER_FORMAT) + header, remaining = data[:header_size], data[header_size:] + revision, sbz1, size, count, sbz2 = struct.unpack(cls.HEADER_FORMAT, header) + + assert len(data) >= size + + for i in range(count): + ace_size = struct.unpack('<H', remaining[2:4])[0] + ace_data, remaining = remaining[:ace_size], remaining[ace_size:] + aces.append(ACE.from_bytes(ace_data)) + + return cls(revision, aces) + + +class SecurityDescriptor(object): + """ + Represents a security descriptor. + + See [MS-DTYP]: 2.4.6 + """ + + HEADER_FORMAT = '<BBHIIII' + + def __init__(self, flags, owner, group, dacl, sacl): + #: Integer bitmask of control flags. Corresponds to the + #: ``Control`` field in [MS-DTYP] 2.4.6. + self.flags = flags + #: Instance of :class:`SID` representing the owner user. + self.owner = owner + #: Instance of :class:`SID` representing the owner group. + self.group = group + #: Instance of :class:`ACL` representing the discretionary access + #: control list, which specifies access restrictions of an object. + self.dacl = dacl + #: Instance of :class:`ACL` representing the system access control + #: list, which specifies audit logging of an object. + self.sacl = sacl + + @classmethod + def from_bytes(cls, data): + owner = None + group = None + dacl = None + sacl = None + + header = data[:struct.calcsize(cls.HEADER_FORMAT)] + (revision, sbz1, flags, owner_offset, group_offset, sacl_offset, + dacl_offset) = struct.unpack(cls.HEADER_FORMAT, header) + + assert revision == 1 + assert flags & SECURITY_DESCRIPTOR_SELF_RELATIVE + for offset in (owner_offset, group_offset, sacl_offset, dacl_offset): + assert 0 <= offset < len(data) + + if owner_offset: + owner = SID.from_bytes(data[owner_offset:]) + if group_offset: + group = SID.from_bytes(data[group_offset:]) + if dacl_offset: + dacl = ACL.from_bytes(data[dacl_offset:]) + if sacl_offset: + sacl = ACL.from_bytes(data[sacl_offset:]) + + return cls(flags, owner, group, dacl, sacl) diff --git a/plugin.video.alfa/lib/sambatools/smb/securityblob.py b/plugin.video.alfa/lib/sambatools/smb/securityblob.py index b71cbd00..38af11ec 100755 --- a/plugin.video.alfa/lib/sambatools/smb/securityblob.py +++ b/plugin.video.alfa/lib/sambatools/smb/securityblob.py @@ -1,136 +1,136 @@ - -from pyasn1.codec.der import encoder, decoder -from pyasn1.type import tag, univ, namedtype, namedval, constraint - -__all__ = [ 'generateNegotiateSecurityBlob', 'generateAuthSecurityBlob', 'decodeChallengeSecurityBlob', 'decodeAuthResponseSecurityBlob' ] - - -class UnsupportedSecurityProvider(Exception): pass -class BadSecurityBlobError(Exception): pass - - -def generateNegotiateSecurityBlob(ntlm_data): - mech_token = univ.OctetString(ntlm_data).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)) - mech_types = MechTypeList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)) - mech_types.setComponentByPosition(0, univ.ObjectIdentifier('1.3.6.1.4.1.311.2.2.10')) - - n = NegTokenInit().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)) - n.setComponentByName('mechTypes', mech_types) - n.setComponentByName('mechToken', mech_token) - - nt = NegotiationToken() - nt.setComponentByName('negTokenInit', n) - - ct = ContextToken() - ct.setComponentByName('thisMech', univ.ObjectIdentifier('1.3.6.1.5.5.2')) - ct.setComponentByName('innerContextToken', nt) - - return encoder.encode(ct) - - -def generateAuthSecurityBlob(ntlm_data): - response_token = univ.OctetString(ntlm_data).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)) - - n = NegTokenTarg().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)) - n.setComponentByName('responseToken', response_token) - - nt = NegotiationToken() - nt.setComponentByName('negTokenTarg', n) - - return encoder.encode(nt) - - -def decodeChallengeSecurityBlob(data): - try: - d, _ = decoder.decode(data, asn1Spec = NegotiationToken()) - nt = d.getComponentByName('negTokenTarg') - - token = nt.getComponentByName('responseToken') - if not token: - raise BadSecurityBlobError('NTLMSSP_CHALLENGE security blob does not contain responseToken field') - - provider_oid = nt.getComponentByName('supportedMech') - if provider_oid and str(provider_oid) != '1.3.6.1.4.1.311.2.2.10': # This OID is defined in [MS-NLMP]: 1.9 - raise UnsupportedSecurityProvider('Security provider "%s" is not supported by pysmb' % str(provider_oid)) - - result = nt.getComponentByName('negResult') - return int(result), str(token) - except Exception, ex: - raise BadSecurityBlobError(str(ex)) - - -def decodeAuthResponseSecurityBlob(data): - try: - d, _ = decoder.decode(data, asn1Spec = NegotiationToken()) - nt = d.getComponentByName('negTokenTarg') - - result = nt.getComponentByName('negResult') - return int(result) - except Exception, ex: - raise BadSecurityBlobError(str(ex)) - - -# -# GSS-API ASN.1 (RFC2478 section 3.2.1) -# - -RESULT_ACCEPT_COMPLETED = 0 -RESULT_ACCEPT_INCOMPLETE = 1 -RESULT_REJECT = 2 - -class NegResultEnumerated(univ.Enumerated): - namedValues = namedval.NamedValues( - ( 'accept_completed', 0 ), - ( 'accept_incomplete', 1 ), - ( 'reject', 2 ) - ) - subtypeSpec = univ.Enumerated.subtypeSpec + constraint.SingleValueConstraint(0, 1, 2) - - -class MechTypeList(univ.SequenceOf): - componentType = univ.ObjectIdentifier() - - -class ContextFlags(univ.BitString): - namedValues = namedval.NamedValues( - ( 'delegFlag', 0 ), - ( 'mutualFlag', 1 ), - ( 'replayFlag', 2 ), - ( 'sequenceFlag', 3 ), - ( 'anonFlag', 4 ), - ( 'confFlag', 5 ), - ( 'integFlag', 6 ) - ) - - -class NegTokenInit(univ.Sequence): - componentType = namedtype.NamedTypes( - namedtype.OptionalNamedType('mechTypes', MechTypeList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), - namedtype.OptionalNamedType('reqFlags', ContextFlags().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), - namedtype.OptionalNamedType('mechToken', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), - namedtype.OptionalNamedType('mechListMIC', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) - ) - - -class NegTokenTarg(univ.Sequence): - componentType = namedtype.NamedTypes( - namedtype.OptionalNamedType('negResult', NegResultEnumerated().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), - namedtype.OptionalNamedType('supportedMech', univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), - namedtype.OptionalNamedType('responseToken', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), - namedtype.OptionalNamedType('mechListMIC', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) - ) - - -class NegotiationToken(univ.Choice): - componentType = namedtype.NamedTypes( - namedtype.NamedType('negTokenInit', NegTokenInit().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), - namedtype.NamedType('negTokenTarg', NegTokenTarg().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) - ) - - -class ContextToken(univ.Sequence): - tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0)) - componentType = namedtype.NamedTypes( - namedtype.NamedType('thisMech', univ.ObjectIdentifier()), - namedtype.NamedType('innerContextToken', NegotiationToken()) - ) + +from pyasn1.type import tag, univ, namedtype, namedval, constraint +from pyasn1.codec.der import encoder, decoder + +__all__ = [ 'generateNegotiateSecurityBlob', 'generateAuthSecurityBlob', 'decodeChallengeSecurityBlob', 'decodeAuthResponseSecurityBlob' ] + + +class UnsupportedSecurityProvider(Exception): pass +class BadSecurityBlobError(Exception): pass + + +def generateNegotiateSecurityBlob(ntlm_data): + mech_token = univ.OctetString(ntlm_data).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)) + mech_types = MechTypeList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)) + mech_types.setComponentByPosition(0, univ.ObjectIdentifier('1.3.6.1.4.1.311.2.2.10')) + + n = NegTokenInit().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)) + n.setComponentByName('mechTypes', mech_types) + n.setComponentByName('mechToken', mech_token) + + nt = NegotiationToken() + nt.setComponentByName('negTokenInit', n) + + ct = ContextToken() + ct.setComponentByName('thisMech', univ.ObjectIdentifier('1.3.6.1.5.5.2')) + ct.setComponentByName('innerContextToken', nt) + + return encoder.encode(ct) + + +def generateAuthSecurityBlob(ntlm_data): + response_token = univ.OctetString(ntlm_data).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)) + + n = NegTokenTarg().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)) + n.setComponentByName('responseToken', response_token) + + nt = NegotiationToken() + nt.setComponentByName('negTokenTarg', n) + + return encoder.encode(nt) + + +def decodeChallengeSecurityBlob(data): + try: + d, _ = decoder.decode(data, asn1Spec = NegotiationToken()) + nt = d.getComponentByName('negTokenTarg') + + token = nt.getComponentByName('responseToken') + if not token: + raise BadSecurityBlobError('NTLMSSP_CHALLENGE security blob does not contain responseToken field') + + provider_oid = nt.getComponentByName('supportedMech') + if provider_oid and str(provider_oid) != '1.3.6.1.4.1.311.2.2.10': # This OID is defined in [MS-NLMP]: 1.9 + raise UnsupportedSecurityProvider('Security provider "%s" is not supported by pysmb' % str(provider_oid)) + + result = nt.getComponentByName('negResult') + return int(result), str(token) + except Exception, ex: + raise BadSecurityBlobError(str(ex)) + + +def decodeAuthResponseSecurityBlob(data): + try: + d, _ = decoder.decode(data, asn1Spec = NegotiationToken()) + nt = d.getComponentByName('negTokenTarg') + + result = nt.getComponentByName('negResult') + return int(result) + except Exception, ex: + raise BadSecurityBlobError(str(ex)) + + +# +# GSS-API ASN.1 (RFC2478 section 3.2.1) +# + +RESULT_ACCEPT_COMPLETED = 0 +RESULT_ACCEPT_INCOMPLETE = 1 +RESULT_REJECT = 2 + +class NegResultEnumerated(univ.Enumerated): + namedValues = namedval.NamedValues( + ( 'accept_completed', 0 ), + ( 'accept_incomplete', 1 ), + ( 'reject', 2 ) + ) + subtypeSpec = univ.Enumerated.subtypeSpec + constraint.SingleValueConstraint(0, 1, 2) + + +class MechTypeList(univ.SequenceOf): + componentType = univ.ObjectIdentifier() + + +class ContextFlags(univ.BitString): + namedValues = namedval.NamedValues( + ( 'delegFlag', 0 ), + ( 'mutualFlag', 1 ), + ( 'replayFlag', 2 ), + ( 'sequenceFlag', 3 ), + ( 'anonFlag', 4 ), + ( 'confFlag', 5 ), + ( 'integFlag', 6 ) + ) + + +class NegTokenInit(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('mechTypes', MechTypeList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('reqFlags', ContextFlags().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('mechToken', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('mechListMIC', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) + ) + + +class NegTokenTarg(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('negResult', NegResultEnumerated().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('supportedMech', univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('responseToken', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('mechListMIC', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) + ) + + +class NegotiationToken(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('negTokenInit', NegTokenInit().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('negTokenTarg', NegTokenTarg().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class ContextToken(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0)) + componentType = namedtype.NamedTypes( + namedtype.NamedType('thisMech', univ.ObjectIdentifier()), + namedtype.NamedType('innerContextToken', NegotiationToken()) + ) diff --git a/plugin.video.alfa/lib/sambatools/smb/smb2_constants.py b/plugin.video.alfa/lib/sambatools/smb/smb2_constants.py index 871da70d..024ed2c4 100755 --- a/plugin.video.alfa/lib/sambatools/smb/smb2_constants.py +++ b/plugin.video.alfa/lib/sambatools/smb/smb2_constants.py @@ -1,101 +1,115 @@ - -# Bitmask for Flags field in SMB2 message header -SMB2_FLAGS_SERVER_TO_REDIR = 0x01 -SMB2_FLAGS_ASYNC_COMMAND = 0x02 -SMB2_FLAGS_RELATED_OPERATIONS = 0x04 -SMB2_FLAGS_SIGNED = 0x08 -SMB2_FLAGS_DFS_OPERATIONS = 0x10000000 - -# Values for Command field in SMB2 message header -SMB2_COM_NEGOTIATE = 0x0000 -SMB2_COM_SESSION_SETUP = 0x0001 -SMB2_COM_LOGOFF = 0x0002 -SMB2_COM_TREE_CONNECT = 0x0003 -SMB2_COM_TREE_DISCONNECT = 0x0004 -SMB2_COM_CREATE = 0x0005 -SMB2_COM_CLOSE = 0x0006 -SMB2_COM_FLUSH = 0x0007 -SMB2_COM_READ = 0x0008 -SMB2_COM_WRITE = 0x0009 -SMB2_COM_LOCK = 0x000A -SMB2_COM_IOCTL = 0x000B -SMB2_COM_CANCEL = 0x000C -SMB2_COM_ECHO = 0x000D -SMB2_COM_QUERY_DIRECTORY = 0x000E -SMB2_COM_CHANGE_NOTIFY = 0x000F -SMB2_COM_QUERY_INFO = 0x0010 -SMB2_COM_SET_INFO = 0x0011 -SMB2_COM_OPLOCK_BREAK = 0x0012 - -SMB2_COMMAND_NAMES = { - 0x0000: 'SMB2_COM_NEGOTIATE', - 0x0001: 'SMB2_COM_SESSION_SETUP', - 0x0002: 'SMB2_COM_LOGOFF', - 0x0003: 'SMB2_COM_TREE_CONNECT', - 0x0004: 'SMB2_COM_TREE_DISCONNECT', - 0x0005: 'SMB2_COM_CREATE', - 0x0006: 'SMB2_COM_CLOSE', - 0x0007: 'SMB2_COM_FLUSH', - 0x0008: 'SMB2_COM_READ', - 0x0009: 'SMB2_COM_WRITE', - 0x000A: 'SMB2_COM_LOCK', - 0x000B: 'SMB2_COM_IOCTL', - 0x000C: 'SMB2_COM_CANCEL', - 0x000D: 'SMB2_COM_ECHO', - 0x000E: 'SMB2_COM_QUERY_DIRECTORY', - 0x000F: 'SMB2_COM_CHANGE_NOTIFY', - 0x0010: 'SMB2_COM_QUERY_INFO', - 0x0011: 'SMB2_COM_SET_INFO', - 0x0012: 'SMB2_COM_OPLOCK_BREAK', -} - -# Values for dialect_revision field in SMB2NegotiateResponse class -SMB2_DIALECT_2 = 0x0202 -SMB2_DIALECT_21 = 0x0210 -SMB2_DIALECT_2ALL = 0x02FF - -# Bit mask for SecurityMode field in SMB2NegotiateResponse class -SMB2_NEGOTIATE_SIGNING_ENABLED = 0x0001 -SMB2_NEGOTIATE_SIGNING_REQUIRED = 0x0002 - -# Values for ShareType field in SMB2TreeConnectResponse class -SMB2_SHARE_TYPE_DISK = 0x01 -SMB2_SHARE_TYPE_PIPE = 0x02 -SMB2_SHARE_TYPE_PRINTER = 0x03 - -# Bitmask for Capabilities in SMB2TreeConnectResponse class -SMB2_SHARE_CAP_DFS = 0x0008 - -# Values for OpLockLevel field in SMB2CreateRequest class -SMB2_OPLOCK_LEVEL_NONE = 0x00 -SMB2_OPLOCK_LEVEL_II = 0x01 -SMB2_OPLOCK_LEVEL_EXCLUSIVE = 0x08 -SMB2_OPLOCK_LEVEL_BATCH = 0x09 -SMB2_OPLOCK_LEVEL_LEASE = 0xFF - -# Values for FileAttributes field in SMB2CreateRequest class -# The values are defined in [MS-FSCC] 2.6 -SMB2_FILE_ATTRIBUTE_ARCHIVE = 0x0020 -SMB2_FILE_ATTRIBUTE_COMPRESSED = 0x0800 -SMB2_FILE_ATTRIBUTE_DIRECTORY = 0x0010 -SMB2_FILE_ATTRIBUTE_ENCRYPTED = 0x4000 -SMB2_FILE_ATTRIBUTE_HIDDEN = 0x0002 -SMB2_FILE_ATTRIBUTE_NORMAL = 0x0080 -SMB2_FILE_ATTRIBUTE_NOTINDEXED = 0x2000 -SMB2_FILE_ATTRIBUTE_OFFLINE = 0x1000 -SMB2_FILE_ATTRIBUTE_READONLY = 0x0001 -SMB2_FILE_ATTRIBUTE_SPARSE = 0x0200 -SMB2_FILE_ATTRIBUTE_SYSTEM = 0x0004 -SMB2_FILE_ATTRIBUTE_TEMPORARY = 0x0100 - -# Values for CreateAction field in SMB2CreateResponse class -SMB2_FILE_SUPERCEDED = 0x00 -SMB2_FILE_OPENED = 0x01 -SMB2_FILE_CREATED = 0x02 -SMB2_FILE_OVERWRITTEN = 0x03 - -# Values for InfoType field in SMB2QueryInfoRequest class -SMB2_INFO_FILE = 0x01 -SMB2_INFO_FILESYSTEM = 0x02 -SMB2_INFO_SECURITY = 0x03 -SMB2_INFO_QUOTA = 0x04 + +# Bitmask for Flags field in SMB2 message header +SMB2_FLAGS_SERVER_TO_REDIR = 0x01 +SMB2_FLAGS_ASYNC_COMMAND = 0x02 +SMB2_FLAGS_RELATED_OPERATIONS = 0x04 +SMB2_FLAGS_SIGNED = 0x08 +SMB2_FLAGS_DFS_OPERATIONS = 0x10000000 + +# Values for Command field in SMB2 message header +SMB2_COM_NEGOTIATE = 0x0000 +SMB2_COM_SESSION_SETUP = 0x0001 +SMB2_COM_LOGOFF = 0x0002 +SMB2_COM_TREE_CONNECT = 0x0003 +SMB2_COM_TREE_DISCONNECT = 0x0004 +SMB2_COM_CREATE = 0x0005 +SMB2_COM_CLOSE = 0x0006 +SMB2_COM_FLUSH = 0x0007 +SMB2_COM_READ = 0x0008 +SMB2_COM_WRITE = 0x0009 +SMB2_COM_LOCK = 0x000A +SMB2_COM_IOCTL = 0x000B +SMB2_COM_CANCEL = 0x000C +SMB2_COM_ECHO = 0x000D +SMB2_COM_QUERY_DIRECTORY = 0x000E +SMB2_COM_CHANGE_NOTIFY = 0x000F +SMB2_COM_QUERY_INFO = 0x0010 +SMB2_COM_SET_INFO = 0x0011 +SMB2_COM_OPLOCK_BREAK = 0x0012 + +SMB2_COMMAND_NAMES = { + 0x0000: 'SMB2_COM_NEGOTIATE', + 0x0001: 'SMB2_COM_SESSION_SETUP', + 0x0002: 'SMB2_COM_LOGOFF', + 0x0003: 'SMB2_COM_TREE_CONNECT', + 0x0004: 'SMB2_COM_TREE_DISCONNECT', + 0x0005: 'SMB2_COM_CREATE', + 0x0006: 'SMB2_COM_CLOSE', + 0x0007: 'SMB2_COM_FLUSH', + 0x0008: 'SMB2_COM_READ', + 0x0009: 'SMB2_COM_WRITE', + 0x000A: 'SMB2_COM_LOCK', + 0x000B: 'SMB2_COM_IOCTL', + 0x000C: 'SMB2_COM_CANCEL', + 0x000D: 'SMB2_COM_ECHO', + 0x000E: 'SMB2_COM_QUERY_DIRECTORY', + 0x000F: 'SMB2_COM_CHANGE_NOTIFY', + 0x0010: 'SMB2_COM_QUERY_INFO', + 0x0011: 'SMB2_COM_SET_INFO', + 0x0012: 'SMB2_COM_OPLOCK_BREAK', +} + +# Values for dialect_revision field in SMB2NegotiateResponse class +SMB2_DIALECT_2 = 0x0202 # 2.0.2 - First SMB2 version +SMB2_DIALECT_21 = 0x0210 # 2.1 - Windows 7 +SMB2_DIALET_30 = 0x0300 # 3.0 - Windows 8 +SMB2_DIALECT_302 = 0x0302 # 3.0.2 - Windows 8.1 +SMB2_DIALECT_311 = 0x0311 # 3.1.1 - Windows 10 +SMB2_DIALECT_2ALL = 0x02FF # Wildcard (for negotiation only) + +# Bit mask for SecurityMode field in SMB2NegotiateResponse class +SMB2_NEGOTIATE_SIGNING_ENABLED = 0x0001 +SMB2_NEGOTIATE_SIGNING_REQUIRED = 0x0002 + +# Values for ShareType field in SMB2TreeConnectResponse class +SMB2_SHARE_TYPE_DISK = 0x01 +SMB2_SHARE_TYPE_PIPE = 0x02 +SMB2_SHARE_TYPE_PRINTER = 0x03 + +# Bitmask for Capabilities in SMB2TreeConnectResponse class +SMB2_SHARE_CAP_DFS = 0x0008 + + +# SMB 2.1 / 3 Capabilities flags +SMB2_GLOBAL_CAP_DFS = 0x01 +SMB2_GLOBAL_CAP_LEASING = 0x02 +SMB2_GLOBAL_CAP_LARGE_MTU = 0x04 +SMB2_GLOBAL_CAP_MULTI_CHANNEL = 0x08 +SMB2_GLOBAL_CAP_PERSISTENT_HANDLES = 0x10 +SMB2_GLOBAL_CAP_DIRECTORY_LEASING = 0x20 +SMB2_GLOBAL_CAP_ENCRYPTION = 0x40 + + +# Values for OpLockLevel field in SMB2CreateRequest class +SMB2_OPLOCK_LEVEL_NONE = 0x00 +SMB2_OPLOCK_LEVEL_II = 0x01 +SMB2_OPLOCK_LEVEL_EXCLUSIVE = 0x08 +SMB2_OPLOCK_LEVEL_BATCH = 0x09 +SMB2_OPLOCK_LEVEL_LEASE = 0xFF + +# Values for FileAttributes field in SMB2CreateRequest class +# The values are defined in [MS-FSCC] 2.6 +SMB2_FILE_ATTRIBUTE_ARCHIVE = 0x0020 +SMB2_FILE_ATTRIBUTE_COMPRESSED = 0x0800 +SMB2_FILE_ATTRIBUTE_DIRECTORY = 0x0010 +SMB2_FILE_ATTRIBUTE_ENCRYPTED = 0x4000 +SMB2_FILE_ATTRIBUTE_HIDDEN = 0x0002 +SMB2_FILE_ATTRIBUTE_NORMAL = 0x0080 +SMB2_FILE_ATTRIBUTE_NOTINDEXED = 0x2000 +SMB2_FILE_ATTRIBUTE_OFFLINE = 0x1000 +SMB2_FILE_ATTRIBUTE_READONLY = 0x0001 +SMB2_FILE_ATTRIBUTE_SPARSE = 0x0200 +SMB2_FILE_ATTRIBUTE_SYSTEM = 0x0004 +SMB2_FILE_ATTRIBUTE_TEMPORARY = 0x0100 + +# Values for CreateAction field in SMB2CreateResponse class +SMB2_FILE_SUPERCEDED = 0x00 +SMB2_FILE_OPENED = 0x01 +SMB2_FILE_CREATED = 0x02 +SMB2_FILE_OVERWRITTEN = 0x03 + +# Values for InfoType field in SMB2QueryInfoRequest class +SMB2_INFO_FILE = 0x01 +SMB2_INFO_FILESYSTEM = 0x02 +SMB2_INFO_SECURITY = 0x03 +SMB2_INFO_QUOTA = 0x04 diff --git a/plugin.video.alfa/lib/sambatools/smb/smb2_structs.py b/plugin.video.alfa/lib/sambatools/smb/smb2_structs.py index 6cd84c50..2e13b491 100755 --- a/plugin.video.alfa/lib/sambatools/smb/smb2_structs.py +++ b/plugin.video.alfa/lib/sambatools/smb/smb2_structs.py @@ -1,851 +1,1001 @@ -import binascii -import logging -import os -import struct -from StringIO import StringIO - -from smb2_constants import * -from smb_constants import * -from smb_structs import ProtocolError -from utils import convertFILETIMEtoEpoch - - -class SMB2Message: - - HEADER_STRUCT_FORMAT = "<4sHHIHHI" # This refers to the common header part that is shared by both sync and async SMB2 header - HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) - - ASYNC_HEADER_STRUCT_FORMAT = "<IQQQ16s" - ASYNC_HEADER_STRUCT_SIZE = struct.calcsize(ASYNC_HEADER_STRUCT_FORMAT) - - SYNC_HEADER_STRUCT_FORMAT = "<IQIIQ16s" - SYNC_HEADER_STRUCT_SIZE = struct.calcsize(SYNC_HEADER_STRUCT_FORMAT) - - HEADER_SIZE = 64 - - log = logging.getLogger('SMB.SMB2Message') - protocol = 2 - - def __init__(self, payload = None): - self.reset() - if payload: - self.payload = payload - self.payload.initMessage(self) - - def __str__(self): - b = StringIO() - b.write('Command: 0x%02X (%s) %s' % ( self.command, SMB2_COMMAND_NAMES.get(self.command, '<unknown>'), os.linesep )) - b.write('Status: 0x%08X %s' % ( self.status, os.linesep )) - b.write('Flags: 0x%02X %s' % ( self.flags, os.linesep )) - b.write('PID: %d %s' % ( self.pid, os.linesep )) - b.write('MID: %d %s' % ( self.mid, os.linesep )) - b.write('TID: %d %s' % ( self.tid, os.linesep )) - b.write('Data: %d bytes %s%s %s' % ( len(self.data), os.linesep, binascii.hexlify(self.data), os.linesep )) - return b.getvalue() - - def reset(self): - self.raw_data = '' - self.command = 0 - self.status = 0 - self.flags = 0 - - self.next_command_offset = 0 - self.mid = 0 - self.session_id = 0 - self.signature = '\0'*16 - self.payload = None - self.data = '' - - # For async SMB2 message - self.async_id = 0 - - # For sync SMB2 message - self.pid = 0 - self.tid = 0 - - # Not used in this class. Maintained for compatibility with SMBMessage class - self.flags2 = 0 - self.uid = 0 - self.security = 0L - self.parameters_data = '' - - def encode(self): - """ - Encode this SMB2 message into a series of bytes suitable to be embedded with a NetBIOS session message. - AssertionError will be raised if this SMB message has not been initialized with a Payload instance - - @return: a string containing the encoded SMB2 message - """ - assert self.payload - - self.pid = os.getpid() - self.payload.prepare(self) - - headers_data = struct.pack(self.HEADER_STRUCT_FORMAT, - '\xFESMB', self.HEADER_SIZE, 0, self.status, self.command, 0, self.flags) + \ - struct.pack(self.SYNC_HEADER_STRUCT_FORMAT, self.next_command_offset, self.mid, self.pid, self.tid, self.session_id, self.signature) - return headers_data + self.data - - def decode(self, buf): - """ - Decodes the SMB message in buf. - All fields of the SMB2Message object will be reset to default values before decoding. - On errors, do not assume that the fields will be reinstated back to what they are before - this method is invoked. - - References - ========== - - [MS-SMB2]: 2.2.1 - - @param buf: data containing one complete SMB2 message - @type buf: string - @return: a positive integer indicating the number of bytes used in buf to decode this SMB message - @raise ProtocolError: raised when decoding fails - """ - buf_len = len(buf) - if buf_len < 64: # All SMB2 headers must be at least 64 bytes. [MS-SMB2]: 2.2.1.1, 2.2.1.2 - raise ProtocolError('Not enough data to decode SMB2 header', buf) - - self.reset() - - protocol, struct_size, self.credit_charge, self.status, \ - self.command, self.credit_re, self.flags = struct.unpack(self.HEADER_STRUCT_FORMAT, buf[:self.HEADER_STRUCT_SIZE]) - - if protocol != '\xFESMB': - raise ProtocolError('Invalid 4-byte SMB2 protocol field', buf) - - if struct_size != self.HEADER_SIZE: - raise ProtocolError('Invalid SMB2 header structure size') - - if self.isAsync: - if buf_len < self.HEADER_STRUCT_SIZE+self.ASYNC_HEADER_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB2 header', buf) - - self.next_command_offset, self.mid, self.async_id, self.session_id, \ - self.signature = struct.unpack(self.ASYNC_HEADER_STRUCT_FORMAT, - buf[self.HEADER_STRUCT_SIZE:self.HEADER_STRUCT_SIZE+self.ASYNC_HEADER_STRUCT_SIZE]) - else: - if buf_len < self.HEADER_STRUCT_SIZE+self.SYNC_HEADER_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB2 header', buf) - - self.next_command_offset, self.mid, self.pid, self.tid, self.session_id, \ - self.signature = struct.unpack(self.SYNC_HEADER_STRUCT_FORMAT, - buf[self.HEADER_STRUCT_SIZE:self.HEADER_STRUCT_SIZE+self.SYNC_HEADER_STRUCT_SIZE]) - - if self.next_command_offset > 0: - self.raw_data = buf[:self.next_command_offset] - self.data = buf[self.HEADER_SIZE:self.next_command_offset] - else: - self.raw_data = buf - self.data = buf[self.HEADER_SIZE:] - - self._decodeCommand() - if self.payload: - self.payload.decode(self) - - return len(self.raw_data) - - def _decodeCommand(self): - if self.command == SMB2_COM_READ: - self.payload = SMB2ReadResponse() - elif self.command == SMB2_COM_WRITE: - self.payload = SMB2WriteResponse() - elif self.command == SMB2_COM_QUERY_DIRECTORY: - self.payload = SMB2QueryDirectoryResponse() - elif self.command == SMB2_COM_CREATE: - self.payload = SMB2CreateResponse() - elif self.command == SMB2_COM_CLOSE: - self.payload = SMB2CloseResponse() - elif self.command == SMB2_COM_QUERY_INFO: - self.payload = SMB2QueryInfoResponse() - elif self.command == SMB2_COM_SET_INFO: - self.payload = SMB2SetInfoResponse() - elif self.command == SMB2_COM_IOCTL: - self.payload = SMB2IoctlResponse() - elif self.command == SMB2_COM_TREE_CONNECT: - self.payload = SMB2TreeConnectResponse() - elif self.command == SMB2_COM_SESSION_SETUP: - self.payload = SMB2SessionSetupResponse() - elif self.command == SMB2_COM_NEGOTIATE: - self.payload = SMB2NegotiateResponse() - elif self.command == SMB2_COM_ECHO: - self.payload = SMB2EchoResponse() - - @property - def isAsync(self): - return bool(self.flags & SMB2_FLAGS_ASYNC_COMMAND) - - @property - def isReply(self): - return bool(self.flags & SMB2_FLAGS_SERVER_TO_REDIR) - - -class Structure: - - def initMessage(self, message): - pass - - def prepare(self, message): - raise NotImplementedError - - def decode(self, message): - raise NotImplementedError - - -class SMB2NegotiateResponse(Structure): - """ - Contains information on the SMB2_NEGOTIATE response from server - - After calling the decode method, each instance will contain the following attributes, - - security_mode (integer) - - dialect_revision (integer) - - server_guid (string) - - max_transact_size (integer) - - max_read_size (integer) - - max_write_size (integer) - - system_time (long) - - server_start_time (long) - - security_blob (string) - - References: - =========== - - [MS-SMB2]: 2.2.4 - """ - - STRUCTURE_FORMAT = "<HHHH16sIIIIQQHHI" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_NEGOTIATE - - if message.status == 0: - struct_size, self.security_mode, self.dialect_revision, _, self.server_guid, self.capabilities, \ - self.max_transact_size, self.max_read_size, self.max_write_size, self.system_time, self.server_start_time, \ - security_buf_offset, security_buf_len, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - - self.server_start_time = convertFILETIMEtoEpoch(self.server_start_time) - self.system_time = convertFILETIMEtoEpoch(self.system_time) - self.security_blob = message.raw_data[security_buf_offset:security_buf_offset+security_buf_len] - - -class SMB2SessionSetupRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.5 - """ - - STRUCTURE_FORMAT = "<HBBIIHHQ" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, security_blob): - self.security_blob = security_blob - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_SESSION_SETUP - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 25, # Structure size. Must be 25 as mandated by [MS-SMB2] 2.2.5 - 0, # VcNumber - 0x01, # Security mode - 0x00, # Capabilities - 0, # Channel - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, - len(self.security_blob), - 0) + self.security_blob - - -class SMB2SessionSetupResponse(Structure): - """ - Contains information about the SMB2_COM_SESSION_SETUP response from the server. - - If the message has no errors, each instance contains the following attributes: - - session_flags (integer) - - security_blob (string) - - References: - =========== - - [MS-SMB2]: 2.2.6 - """ - - STRUCTURE_FORMAT = "<HHHH" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_SESSION_SETUP - - struct_size, self.session_flags, security_blob_offset, security_blob_len \ - = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - - self.security_blob = message.raw_data[security_blob_offset:security_blob_offset+security_blob_len] - - -class SMB2TreeConnectRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.9 - """ - - STRUCTURE_FORMAT = "<HHHH" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, path): - self.path = path - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_TREE_CONNECT - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 9, # Structure size. Must be 9 as mandated by [MS-SMB2] 2.2.9 - 0, # Reserved - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, - len(self.path)*2) + self.path.encode('UTF-16LE') - - -class SMB2TreeConnectResponse(Structure): - """ - Contains information about the SMB2_COM_TREE_CONNECT response from the server. - - If the message has no errors, each instance contains the following attributes: - - share_type (integer): one of the SMB2_SHARE_TYPE_xxx constants - - share_flags (integer) - - capabilities (integer): bitmask of SMB2_SHARE_CAP_xxx - - maximal_access (integer) - - References: - =========== - - [MS-SMB2]: 2.2.10 - """ - - STRUCTURE_FORMAT = "<HBBIII" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_TREE_CONNECT - - if message.status == 0: - struct_size, self.share_type, _, \ - self.share_flags, self.capabilities, self.maximal_access \ - = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - - -class SMB2CreateRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.13 - """ - - STRUCTURE_FORMAT = "<HBBIQQIIIIIHHII" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, filename, file_attributes = 0, - access_mask = 0, share_access = 0, create_disp = 0, create_options = 0, - impersonation = SEC_ANONYMOUS, - oplock = SMB2_OPLOCK_LEVEL_NONE, - create_context_data = ''): - self.filename = filename - self.file_attributes = file_attributes - self.access_mask = access_mask - self.share_access = share_access - self.create_disp = create_disp - self.create_options = create_options - self.oplock = oplock - self.impersonation = impersonation - self.create_context_data = create_context_data or '' - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_CREATE - - def prepare(self, message): - buf = self.filename.encode('UTF-16LE') - if self.create_context_data: - n = SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE + len(buf) - if n % 8 != 0: - buf += '\0'*(8-n%8) - create_context_offset = SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE + len(buf) - else: - create_context_offset = n - buf += self.create_context_data - else: - create_context_offset = 0 - if not buf: - buf = '\0' - - assert create_context_offset % 8 == 0 - message.data = struct.pack(self.STRUCTURE_FORMAT, - 57, # Structure size. Must be 57 as mandated by [MS-SMB2] 2.2.13 - 0, # SecurityFlag. Must be 0 - self.oplock, - self.impersonation, - 0, # SmbCreateFlags. Must be 0 - 0, # Reserved. Must be 0 - self.access_mask, # DesiredAccess. [MS-SMB2] 2.2.13.1 - self.file_attributes, - self.share_access, - self.create_disp, - self.create_options, - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # NameOffset - len(self.filename)*2, # NameLength in bytes - create_context_offset, # CreateContextOffset - len(self.create_context_data) # CreateContextLength - ) + buf - -class SMB2CreateResponse(Structure): - """ - Contains information about the SMB2_COM_CREATE response from the server. - - If the message has no errors, each instance contains the following attributes: - - oplock (integer): one of SMB2_OPLOCK_LEVEL_xxx constants - - create_action (integer): one of SMB2_FILE_xxx constants - - allocation_size (long) - - file_size (long) - - file_attributes (integer) - - fid (16-bytes string) - - create_time, lastaccess_time, lastwrite_time, change_time (float) - - References: - =========== - - [MS-SMB2]: 2.2.14 - """ - - STRUCTURE_FORMAT = "<HBBIQQQQQQII16sII" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_CREATE - - if message.status == 0: - struct_size, self.oplock, _, self.create_action, \ - create_time, lastaccess_time, lastwrite_time, change_time, \ - self.allocation_size, self.file_size, self.file_attributes, \ - _, self.fid, _, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - - self.create_time = convertFILETIMEtoEpoch(create_time) - self.lastaccess_time = convertFILETIMEtoEpoch(lastaccess_time) - self.lastwrite_time = convertFILETIMEtoEpoch(lastwrite_time) - self.change_time = convertFILETIMEtoEpoch(change_time) - - -class SMB2WriteRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.21 - """ - - STRUCTURE_FORMAT = "<HHIQ16sIIHHI" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, fid, data, offset, remaining_len = 0, flags = 0): - assert len(fid) == 16 - self.fid = fid - self.data = data - self.offset = offset - self.remaining_len = remaining_len - self.flags = flags - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_WRITE - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 49, # Structure size. Must be 49 as mandated by [MS-SMB2] 2.2.21 - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # DataOffset - len(self.data), - self.offset, - self.fid, - 0, # Channel. Must be 0 - self.remaining_len, # RemainingBytes - 0, # WriteChannelInfoOffset, - 0, # WriteChannelInfoLength - self.flags) + self.data - - -class SMB2WriteResponse(Structure): - """ - Contains information about the SMB2_WRITE response from the server. - - If the message has no errors, each instance contains the following attributes: - - count (integer) - - References: - =========== - - [MS-SMB2]: 2.2.22 - """ - - STRUCTURE_FORMAT = "<HHIIHH" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_WRITE - if message.status == 0: - struct_size, _, self.count, _, _, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - - - -class SMB2ReadRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.19 - """ - - STRUCTURE_FORMAT = "<HBBIQ16sIIIHH" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, fid, read_offset, read_len, min_read_len = 0): - self.fid = fid - self.read_offset = read_offset - self.read_len = read_len - self.min_read_len = min_read_len - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_READ - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 49, # Structure size. Must be 49 as mandated by [MS-SMB2] 2.2.19 - 0, # Padding - 0, # Reserved - self.read_len, - self.read_offset, - self.fid, - self.min_read_len, - 0, # Channel - 0, # RemainingBytes - 0, # ReadChannelInfoOffset - 0 # ReadChannelInfoLength - ) + '\0' - - -class SMB2ReadResponse(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.20 - """ - - STRUCTURE_FORMAT = "<HBBIII" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_READ - - if message.status == 0: - struct_size, data_offset, _, self.data_length, _, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - self.data = message.raw_data[data_offset:data_offset+self.data_length] - - -class SMB2IoctlRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.31 - """ - - STRUCTURE_FORMAT = "<HHI16sIIIIIIII" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, fid, ctlcode, flags, in_data, max_out_size = 65536): - self.ctlcode = ctlcode - self.fid = fid - self.flags = flags - self.in_data = in_data - self.max_out_size = max_out_size - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_IOCTL - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 57, # Structure size. Must be 57 as mandated by [MS-SMB2] 2.2.31 - 0, # Reserved - self.ctlcode, # CtlCode - self.fid, - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # InputOffset - len(self.in_data), # InputCount - 0, # MaxInputResponse - 0, # OutputOffset - 0, # OutputCount - self.max_out_size, # MaxOutputResponse - self.flags, # Flags - 0 # Reserved - ) + self.in_data - - -class SMB2IoctlResponse(Structure): - """ - Contains information about the SMB2_IOCTL response from the server. - - If the message has no errors, each instance contains the following attributes: - - ctlcode (integer) - - fid (16-bytes string) - - flags (integer) - - in_data (string) - - out_data (string) - - References: - =========== - - [MS-SMB2]: 2.2.32 - """ - - STRUCTURE_FORMAT = "<HHI16sIIIIII" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_IOCTL - - if message.status == 0: - struct_size, _, self.ctlcode, self.fid, \ - input_offset, input_len, output_offset, output_len, \ - self.flags, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - - if input_len > 0: - self.in_data = message.raw_data[input_offset:input_offset+input_len] - else: - self.in_data = '' - - if output_len > 0: - self.out_data = message.raw_data[output_offset:output_offset+output_len] - else: - self.out_data = '' - - -class SMB2CloseRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.15 - """ - - STRUCTURE_FORMAT = "<HHI16s" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, fid, flags = 0): - self.fid = fid - self.flags = flags - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_CLOSE - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 24, # Structure size. Must be 24 as mandated by [MS-SMB2]: 2.2.15 - self.flags, - 0, # Reserved. Must be 0 - self.fid) - - -class SMB2CloseResponse(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.16 - """ - - def decode(self, message): - assert message.command == SMB2_COM_CLOSE - - -class SMB2QueryDirectoryRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.33 - """ - - STRUCTURE_FORMAT = "<HBBI16sHHI" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, fid, filename, info_class, flags, output_buf_len): - self.fid = fid - self.filename = filename - self.info_class = info_class - self.flags = flags - self.output_buf_len = output_buf_len - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_QUERY_DIRECTORY - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 33, # Structure size. Must be 33 as mandated by [MS-SMB2] 2.2.33 - self.info_class, # FileInformationClass - self.flags, # Flags - 0, # FileIndex - self.fid, # FileID - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # FileNameOffset - len(self.filename)*2, - self.output_buf_len) + self.filename.encode('UTF-16LE') - - -class SMB2QueryDirectoryResponse(Structure): - """ - Contains information about the SMB2_COM_QUERY_DIRECTORY response from the server. - - If the message has no errors, each instance contains the following attributes: - - data_length (integer) - - data (string) - - References: - =========== - - [MS-SMB2]: 2.2.34 - """ - - STRUCTURE_FORMAT = "<HHI" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_QUERY_DIRECTORY - - if message.status == 0: - struct_size, offset, self.data_length = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - self.data = message.raw_data[offset:offset+self.data_length] - - -class SMB2QueryInfoRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.37 - """ - - STRUCTURE_FORMAT = "<HBBIHHIII16s" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, fid, flags, additional_info, info_type, file_info_class, input_buf, output_buf_len): - self.fid = fid - self.flags = flags - self.additional_info = additional_info - self.info_type = info_type - self.file_info_class = file_info_class - self.output_buf_len = output_buf_len - self.input_buf = input_buf or '' - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_QUERY_INFO - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 41, # Structure size. Must be 41 as mandated by [MS-SMB2] 2.2.37 - self.info_type, # InfoType - self.file_info_class, # FileInfoClass - self.output_buf_len, # OutputBufferLength - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # InputBufferOffset - 0, # Reserved - len(self.input_buf), # InputBufferLength - self.additional_info, # AdditionalInformation - self.flags, # Flags - self.fid # FileId - ) + self.input_buf - - -class SMB2QueryInfoResponse(Structure): - """ - Contains information about the SMB2_COM_QUERY_INFO response from the server. - - If the message has no errors, each instance contains the following attributes: - - data_length (integer) - - data (string) - - References: - =========== - - [MS-SMB2]: 2.2.38 - """ - - STRUCTURE_FORMAT = "<HHI" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def decode(self, message): - assert message.command == SMB2_COM_QUERY_INFO - - if message.status == 0: - struct_size, buf_offset, self.data_length = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) - self.data = message.raw_data[buf_offset:buf_offset+self.data_length] - - -class SMB2SetInfoRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.39 - """ - - STRUCTURE_FORMAT = "<HBBIHHI16s" - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def __init__(self, fid, additional_info, info_type, file_info_class, data): - self.fid = fid - self.additional_info = additional_info - self.info_type = info_type - self.file_info_class = file_info_class - self.data = data or '' - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_SET_INFO - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 33, # StructureSize. Must be 33 as mandated by [MS-SMB2] 2.2.39 - self.info_type, # InfoType - self.file_info_class, # FileInfoClass - len(self.data), # BufferLength - SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # BufferOffset - 0, # Reserved - self.additional_info, # AdditionalInformation - self.fid # FileId - ) + self.data - -class SMB2SetInfoResponse(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.40 - """ - - def decode(self, message): - pass - - -class SMB2EchoRequest(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.28 - """ - - STRUCTURE_FORMAT = '<HH' - STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) - - def initMessage(self, message): - Structure.initMessage(self, message) - message.command = SMB2_COM_ECHO - - def prepare(self, message): - message.data = struct.pack(self.STRUCTURE_FORMAT, - 4, # StructureSize. Must be 4 as mandated by [MS-SMB2] 2.2.29 - 0) # Reserved - -class SMB2EchoResponse(Structure): - """ - References: - =========== - - [MS-SMB2]: 2.2.29 - """ - - def decode(self, message): - pass + +import os, sys, struct, types, logging, binascii, time, uuid +from StringIO import StringIO +from smb_structs import ProtocolError +from smb_constants import * +from smb2_constants import * +from utils import convertFILETIMEtoEpoch + + +class SMB2Message: + + HEADER_STRUCT_FORMAT = "<4sHHIHHI" # This refers to the common header part that is shared by both sync and async SMB2 header + HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) + + ASYNC_HEADER_STRUCT_FORMAT = "<IQQQ16s" + ASYNC_HEADER_STRUCT_SIZE = struct.calcsize(ASYNC_HEADER_STRUCT_FORMAT) + + SYNC_HEADER_STRUCT_FORMAT = "<IQIIQ16s" + SYNC_HEADER_STRUCT_SIZE = struct.calcsize(SYNC_HEADER_STRUCT_FORMAT) + + HEADER_SIZE = 64 + + log = logging.getLogger('SMB.SMB2Message') + protocol = 2 + + + def __init__(self, conn = None, payload = None): + """ + Initialise a new SMB2 Message. + conn - reference to the connection, the SMB class + payload - the message payload, if any + """ + self.reset() + self.conn = conn + if payload: + self.payload = payload + self.payload.initMessage(self) + + def __str__(self): + b = StringIO() + b.write('Command: 0x%02X (%s) %s' % ( self.command, SMB2_COMMAND_NAMES.get(self.command, '<unknown>'), os.linesep )) + b.write('Status: 0x%08X %s' % ( self.status, os.linesep )) + b.write('Flags: 0x%02X %s' % ( self.flags, os.linesep )) + b.write('PID: %d %s' % ( self.pid, os.linesep )) + b.write('MID: %d %s' % ( self.mid, os.linesep )) + b.write('TID: %d %s' % ( self.tid, os.linesep )) + b.write('Data: %d bytes %s%s %s' % ( len(self.data), os.linesep, binascii.hexlify(self.data), os.linesep )) + return b.getvalue() + + def reset(self): + self.raw_data = '' + self.command = 0 + self.status = 0 + self.flags = 0 + + self.next_command_offset = 0 + self.mid = 0 + self.session_id = 0 + self.signature = '\0'*16 + self.payload = None + self.data = '' + + # For async SMB2 message + self.async_id = 0 + + # For sync SMB2 message + self.pid = 0 + self.tid = 0 + + # credit related + self.credit_charge = 0 + self.credit_request = 1 + + # Not used in this class. Maintained for compatibility with SMBMessage class + self.flags2 = 0 + self.uid = 0 + self.security = 0L + self.parameters_data = '' + + def encode(self): + """ + Encode this SMB2 message into a series of bytes suitable to be embedded with a NetBIOS session message. + AssertionError will be raised if this SMB message has not been initialized with an SMB instance + AssertionError will be raised if this SMB message has not been initialized with a Payload instance + + The header format is: + - Protocol ID + - Structure Size + - Credit Charge + - Status / Channel Sequence + - Command + - Credit Request / Credit Response + - Flags + - Next Compound + - MessageId + - Reserved + - TreeId + - Session ID + - Signature + + @return: a string containing the encoded SMB2 message + """ + assert self.payload + assert self.conn + + self.pid = os.getpid() + self.payload.prepare(self) + + # If Connection.Dialect is not "2.0.2" and if Connection.SupportsMultiCredit is TRUE, the + # CreditCharge field in the SMB2 header MUST be set to ( 1 + (OutputBufferLength - 1) / 65536 ) + # This only applies to SMB2ReadRequest, SMB2WriteRequest, SMB2IoctlRequest and SMB2QueryDirectory + # See: MS-SMB2 3.2.4.1.5: For all other requests, the client MUST set CreditCharge to 1, even if the + # payload size of a request or the anticipated response is greater than 65536. + if self.conn.smb2_dialect != SMB2_DIALECT_2: + if self.conn.cap_multi_credit: + # self.credit_charge will be set by some commands if necessary (Read/Write/Ioctl/QueryDirectory) + # If not set, but dialect is SMB 2.1 or above, we must set it to 1 + if self.credit_charge is 0: + self.credit_charge = 1 + else: + # If >= SMB 2.1, but server does not support multi credit operations we must set to 1 + self.credit_charge = 1 + + if self.mid > 3: + self.credit_request = 127 + + headers_data = struct.pack(self.HEADER_STRUCT_FORMAT, + '\xFESMB', # Protocol ID + self.HEADER_SIZE, # Structure Size + self.credit_charge, # Credit Charge + self.status, # Status / Channel Sequence + self.command, # Command + self.credit_request, # Credit Request / Credit Response + self.flags, # Flags + ) + \ + struct.pack(self.SYNC_HEADER_STRUCT_FORMAT, + self.next_command_offset, # Next Compound + self.mid, # Message ID + self.pid, # Process ID + self.tid, # Tree ID + self.session_id, # Session ID + self.signature) # Signature + return headers_data + self.data + + def decode(self, buf): + """ + Decodes the SMB message in buf. + All fields of the SMB2Message object will be reset to default values before decoding. + On errors, do not assume that the fields will be reinstated back to what they are before + this method is invoked. + + References + ========== + - [MS-SMB2]: 2.2.1 + + @param buf: data containing one complete SMB2 message + @type buf: string + @return: a positive integer indicating the number of bytes used in buf to decode this SMB message + @raise ProtocolError: raised when decoding fails + """ + buf_len = len(buf) + if buf_len < 64: # All SMB2 headers must be at least 64 bytes. [MS-SMB2]: 2.2.1.1, 2.2.1.2 + raise ProtocolError('Not enough data to decode SMB2 header', buf) + + self.reset() + + protocol, struct_size, self.credit_charge, self.status, \ + self.command, self.credit_response, \ + self.flags = struct.unpack(self.HEADER_STRUCT_FORMAT, buf[:self.HEADER_STRUCT_SIZE]) + + if protocol != '\xFESMB': + raise ProtocolError('Invalid 4-byte SMB2 protocol field', buf) + + if struct_size != self.HEADER_SIZE: + raise ProtocolError('Invalid SMB2 header structure size') + + if self.isAsync: + if buf_len < self.HEADER_STRUCT_SIZE+self.ASYNC_HEADER_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB2 header', buf) + + self.next_command_offset, self.mid, self.async_id, self.session_id, \ + self.signature = struct.unpack(self.ASYNC_HEADER_STRUCT_FORMAT, + buf[self.HEADER_STRUCT_SIZE:self.HEADER_STRUCT_SIZE+self.ASYNC_HEADER_STRUCT_SIZE]) + else: + if buf_len < self.HEADER_STRUCT_SIZE+self.SYNC_HEADER_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB2 header', buf) + + self.next_command_offset, self.mid, self.pid, self.tid, self.session_id, \ + self.signature = struct.unpack(self.SYNC_HEADER_STRUCT_FORMAT, + buf[self.HEADER_STRUCT_SIZE:self.HEADER_STRUCT_SIZE+self.SYNC_HEADER_STRUCT_SIZE]) + + if self.next_command_offset > 0: + self.raw_data = buf[:self.next_command_offset] + self.data = buf[self.HEADER_SIZE:self.next_command_offset] + else: + self.raw_data = buf + self.data = buf[self.HEADER_SIZE:] + + self._decodeCommand() + if self.payload: + self.payload.decode(self) + + return len(self.raw_data) + + def _decodeCommand(self): + if self.command == SMB2_COM_READ: + self.payload = SMB2ReadResponse() + elif self.command == SMB2_COM_WRITE: + self.payload = SMB2WriteResponse() + elif self.command == SMB2_COM_QUERY_DIRECTORY: + self.payload = SMB2QueryDirectoryResponse() + elif self.command == SMB2_COM_CREATE: + self.payload = SMB2CreateResponse() + elif self.command == SMB2_COM_CLOSE: + self.payload = SMB2CloseResponse() + elif self.command == SMB2_COM_QUERY_INFO: + self.payload = SMB2QueryInfoResponse() + elif self.command == SMB2_COM_SET_INFO: + self.payload = SMB2SetInfoResponse() + elif self.command == SMB2_COM_IOCTL: + self.payload = SMB2IoctlResponse() + elif self.command == SMB2_COM_TREE_CONNECT: + self.payload = SMB2TreeConnectResponse() + elif self.command == SMB2_COM_SESSION_SETUP: + self.payload = SMB2SessionSetupResponse() + elif self.command == SMB2_COM_NEGOTIATE: + self.payload = SMB2NegotiateResponse() + elif self.command == SMB2_COM_ECHO: + self.payload = SMB2EchoResponse() + + @property + def isAsync(self): + return bool(self.flags & SMB2_FLAGS_ASYNC_COMMAND) + + @property + def isReply(self): + return bool(self.flags & SMB2_FLAGS_SERVER_TO_REDIR) + + +class Structure: + + def initMessage(self, message): + pass + + def prepare(self, message): + raise NotImplementedError + + def decode(self, message): + raise NotImplementedError + + +class SMB2NegotiateRequest(Structure): + """ + 2.2.3 SMB2 NEGOTIATE Request + The SMB2 NEGOTIATE Request packet is used by the client to notify the server what dialects of the SMB 2 Protocol + the client understands. This request is composed of an SMB2 header, as specified in section 2.2.1, + followed by this request structure: + + SMB2 Negotiate Request Packet structure: + StructureSize (2 bytes) + DialectCount (2 bytes) + SecurityMode (2 bytes) + Reserved (2 bytes) + Capabilities (4 bytes) + ClientGuid (16 bytes) + ClientStartTime (8 bytes): + ClientStartTime (8 bytes): + Dialects (variable): An array of one or more 16-bit integers + + References: + =========== + - [MS-SMB2]: 2.2.3 + + """ + + + STRUCTURE_FORMAT = "<HHHHI16sQHH" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_NEGOTIATE + + def prepare(self, message): + # TODO! Do we need to save the GUID and present it later in other requests? + # The SMB docs don't exactly explain what the guid is for + message.data = struct.pack(self.STRUCTURE_FORMAT, + 36, # Structure size. Must be 36 as mandated by [MS-SMB2] 2.2.3 + 2, # DialectCount + 0x01, # Security mode + 0, # Reserved + 0x00, # Capabilities + uuid.uuid4().bytes, # Client GUID + 0, # Client start time + SMB2_DIALECT_2, + SMB2_DIALECT_21) + + +class SMB2NegotiateResponse(Structure): + """ + Contains information on the SMB2_NEGOTIATE response from server + + After calling the decode method, each instance will contain the following attributes, + - security_mode (integer) + - dialect_revision (integer) + - server_guid (string) + - max_transact_size (integer) + - max_read_size (integer) + - max_write_size (integer) + - system_time (long) + - server_start_time (long) + - security_blob (string) + + References: + =========== + - [MS-SMB2]: 2.2.4 + """ + + STRUCTURE_FORMAT = "<HHHH16sIIIIQQHHI" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_NEGOTIATE + + if message.status == 0: + struct_size, self.security_mode, self.dialect_revision, _, self.server_guid, self.capabilities, \ + self.max_transact_size, self.max_read_size, self.max_write_size, self.system_time, self.server_start_time, \ + security_buf_offset, security_buf_len, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + + self.server_start_time = convertFILETIMEtoEpoch(self.server_start_time) + self.system_time = convertFILETIMEtoEpoch(self.system_time) + self.security_blob = message.raw_data[security_buf_offset:security_buf_offset+security_buf_len] + message.conn.smb2_dialect = self.dialect_revision + + +class SMB2SessionSetupRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.5 + """ + + STRUCTURE_FORMAT = "<HBBIIHHQ" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, security_blob): + self.security_blob = security_blob + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_SESSION_SETUP + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 25, # Structure size. Must be 25 as mandated by [MS-SMB2] 2.2.5 + 0, # VcNumber + 0x01, # Security mode + 0x00, # Capabilities + 0, # Channel + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, + len(self.security_blob), + 0) + self.security_blob + + +class SMB2SessionSetupResponse(Structure): + """ + Contains information about the SMB2_COM_SESSION_SETUP response from the server. + + If the message has no errors, each instance contains the following attributes: + - session_flags (integer) + - security_blob (string) + + References: + =========== + - [MS-SMB2]: 2.2.6 + """ + + STRUCTURE_FORMAT = "<HHHH" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + @property + def isGuestSession(self): + return (self.session_flags & 0x0001) > 0 # SMB2_SESSION_FLAG_IS_GUEST + + @property + def isAnonymousSession(self): + return (self.session_flags & 0x0002) > 0 # SMB2_SESSION_FLAG_IS_NULL + + def decode(self, message): + assert message.command == SMB2_COM_SESSION_SETUP + + struct_size, self.session_flags, security_blob_offset, security_blob_len \ + = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + + self.security_blob = message.raw_data[security_blob_offset:security_blob_offset+security_blob_len] + + +class SMB2TreeConnectRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.9 + """ + + STRUCTURE_FORMAT = "<HHHH" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, path): + self.path = path + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_TREE_CONNECT + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 9, # Structure size. Must be 9 as mandated by [MS-SMB2] 2.2.9 + 0, # Reserved + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, + len(self.path)*2) + self.path.encode('UTF-16LE') + + +class SMB2TreeConnectResponse(Structure): + """ + Contains information about the SMB2_COM_TREE_CONNECT response from the server. + + If the message has no errors, each instance contains the following attributes: + - share_type (integer): one of the SMB2_SHARE_TYPE_xxx constants + - share_flags (integer) + - capabilities (integer): bitmask of SMB2_SHARE_CAP_xxx + - maximal_access (integer) + + References: + =========== + - [MS-SMB2]: 2.2.10 + """ + + STRUCTURE_FORMAT = "<HBBIII" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_TREE_CONNECT + + if message.status == 0: + struct_size, self.share_type, _, \ + self.share_flags, self.capabilities, self.maximal_access \ + = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + + +class SMB2CreateRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.13 + """ + + STRUCTURE_FORMAT = "<HBBIQQIIIIIHHII" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, filename, file_attributes = 0, + access_mask = 0, share_access = 0, create_disp = 0, create_options = 0, + impersonation = SEC_ANONYMOUS, + oplock = SMB2_OPLOCK_LEVEL_NONE, + create_context_data = ''): + self.filename = filename + self.file_attributes = file_attributes + self.access_mask = access_mask + self.share_access = share_access + self.create_disp = create_disp + self.create_options = create_options + self.oplock = oplock + self.impersonation = impersonation + self.create_context_data = create_context_data or '' + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_CREATE + + def prepare(self, message): + buf = self.filename.encode('UTF-16LE') + if self.create_context_data: + n = SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE + len(buf) + if n % 8 != 0: + buf += '\0'*(8-n%8) + create_context_offset = SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE + len(buf) + else: + create_context_offset = n + buf += self.create_context_data + else: + create_context_offset = 0 + if not buf: + buf = '\0' + + assert create_context_offset % 8 == 0 + message.data = struct.pack(self.STRUCTURE_FORMAT, + 57, # Structure size. Must be 57 as mandated by [MS-SMB2] 2.2.13 + 0, # SecurityFlag. Must be 0 + self.oplock, + self.impersonation, + 0, # SmbCreateFlags. Must be 0 + 0, # Reserved. Must be 0 + self.access_mask, # DesiredAccess. [MS-SMB2] 2.2.13.1 + self.file_attributes, + self.share_access, + self.create_disp, + self.create_options, + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # NameOffset + len(self.filename)*2, # NameLength in bytes + create_context_offset, # CreateContextOffset + len(self.create_context_data) # CreateContextLength + ) + buf + +class SMB2CreateResponse(Structure): + """ + Contains information about the SMB2_COM_CREATE response from the server. + + If the message has no errors, each instance contains the following attributes: + - oplock (integer): one of SMB2_OPLOCK_LEVEL_xxx constants + - create_action (integer): one of SMB2_FILE_xxx constants + - allocation_size (long) + - file_size (long) + - file_attributes (integer) + - fid (16-bytes string) + - create_time, lastaccess_time, lastwrite_time, change_time (float) + + References: + =========== + - [MS-SMB2]: 2.2.14 + """ + + STRUCTURE_FORMAT = "<HBBIQQQQQQII16sII" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_CREATE + + if message.status == 0: + struct_size, self.oplock, _, self.create_action, \ + create_time, lastaccess_time, lastwrite_time, change_time, \ + self.allocation_size, self.file_size, self.file_attributes, \ + _, self.fid, _, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + + self.create_time = convertFILETIMEtoEpoch(create_time) + self.lastaccess_time = convertFILETIMEtoEpoch(lastaccess_time) + self.lastwrite_time = convertFILETIMEtoEpoch(lastwrite_time) + self.change_time = convertFILETIMEtoEpoch(change_time) + + +class SMB2WriteRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.21 + """ + + STRUCTURE_FORMAT = "<HHIQ16sIIHHI" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, fid, data, offset, remaining_len = 0, flags = 0): + assert len(fid) == 16 + self.fid = fid + self.data = data + self.offset = offset + self.remaining_len = remaining_len + self.flags = flags + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_WRITE + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 49, # Structure size. Must be 49 as mandated by [MS-SMB2] 2.2.21 + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # DataOffset + len(self.data), + self.offset, + self.fid, + 0, # Channel. Must be 0 + self.remaining_len, # RemainingBytes + 0, # WriteChannelInfoOffset, + 0, # WriteChannelInfoLength + self.flags) + self.data + + # MS-SMB2 3.2.4.7 + # If a client requests writing to a file, Connection.Dialect is not "2.0.2", and if + # Connection.SupportsMultiCredit is TRUE, the CreditCharge field in the SMB2 header MUST be set + # to ( 1 + (Length - 1) / 65536 ) + if message.conn.smb2_dialect != SMB2_DIALECT_2 and message.conn.cap_multi_credit: + message.credit_charge = int(1 + (len(self.data) -1) / 65536) + + +class SMB2WriteResponse(Structure): + """ + Contains information about the SMB2_WRITE response from the server. + + If the message has no errors, each instance contains the following attributes: + - count (integer) + + References: + =========== + - [MS-SMB2]: 2.2.22 + """ + + STRUCTURE_FORMAT = "<HHIIHH" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_WRITE + if message.status == 0: + struct_size, _, self.count, _, _, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + + + +class SMB2ReadRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.19 + """ + + STRUCTURE_FORMAT = "<HBBIQ16sIIIHH" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, fid, read_offset, read_len, min_read_len = 0): + self.fid = fid + self.read_offset = read_offset + self.read_len = read_len + self.min_read_len = min_read_len + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_READ + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 49, # Structure size. Must be 49 as mandated by [MS-SMB2] 2.2.19 + 0, # Padding + 0, # Reserved + self.read_len, + self.read_offset, + self.fid, + self.min_read_len, + 0, # Channel + 0, # RemainingBytes + 0, # ReadChannelInfoOffset + 0 # ReadChannelInfoLength + ) + '\0' + + # MS-SMB2 3.2.4.6 + # If a client requests reading from a file, Connection.Dialect is not "2.0.2", and if + # Connection.SupportsMultiCredit is TRUE, the CreditCharge field in the SMB2 header MUST be set + # to ( 1 + (Length - 1) / 65536 ) + if message.conn.smb2_dialect != SMB2_DIALECT_2 and message.conn.cap_multi_credit: + message.credit_charge = int(1 + (self.read_len -1) / 65536) + + +class SMB2ReadResponse(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.20 + """ + + STRUCTURE_FORMAT = "<HBBIII" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_READ + + if message.status == 0: + struct_size, data_offset, _, self.data_length, _, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + self.data = message.raw_data[data_offset:data_offset+self.data_length] + + +class SMB2IoctlRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.31 + """ + + STRUCTURE_FORMAT = "<HHI16sIIIIIIII" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, fid, ctlcode, flags, in_data, max_out_size = 65536): + self.ctlcode = ctlcode + self.fid = fid + self.flags = flags + self.in_data = in_data + self.max_out_size = max_out_size + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_IOCTL + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 57, # Structure size. Must be 57 as mandated by [MS-SMB2] 2.2.31 + 0, # Reserved + self.ctlcode, # CtlCode + self.fid, + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # InputOffset + len(self.in_data), # InputCount + 0, # MaxInputResponse + 0, # OutputOffset + 0, # OutputCount + self.max_out_size, # MaxOutputResponse + self.flags, # Flags + 0 # Reserved + ) + self.in_data + + # If Connection.SupportsMultiCredit is TRUE, the CreditCharge field in the SMB2 header + # SHOULD be set to (max(InputCount, MaxOutputResponse) - 1) / 65536 + 1 + if message.conn.smb2_dialect != SMB2_DIALECT_2 and message.conn.cap_multi_credit: + message.credit_charge = int((max(len(self.in_data), self.max_out_size) - 1) / 65536 + 1) + + +class SMB2IoctlResponse(Structure): + """ + Contains information about the SMB2_IOCTL response from the server. + + If the message has no errors, each instance contains the following attributes: + - ctlcode (integer) + - fid (16-bytes string) + - flags (integer) + - in_data (string) + - out_data (string) + + References: + =========== + - [MS-SMB2]: 2.2.32 + """ + + STRUCTURE_FORMAT = "<HHI16sIIIIII" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_IOCTL + + if message.status == 0: + struct_size, _, self.ctlcode, self.fid, \ + input_offset, input_len, output_offset, output_len, \ + self.flags, _ = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + + if input_len > 0: + self.in_data = message.raw_data[input_offset:input_offset+input_len] + else: + self.in_data = '' + + if output_len > 0: + self.out_data = message.raw_data[output_offset:output_offset+output_len] + else: + self.out_data = '' + + +class SMB2CloseRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.15 + """ + + STRUCTURE_FORMAT = "<HHI16s" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, fid, flags = 0): + self.fid = fid + self.flags = flags + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_CLOSE + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 24, # Structure size. Must be 24 as mandated by [MS-SMB2]: 2.2.15 + self.flags, + 0, # Reserved. Must be 0 + self.fid) + + +class SMB2CloseResponse(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.16 + """ + + def decode(self, message): + assert message.command == SMB2_COM_CLOSE + + +class SMB2QueryDirectoryRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.33 + """ + + STRUCTURE_FORMAT = "<HBBI16sHHI" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, fid, filename, info_class, flags, output_buf_len): + self.fid = fid + self.filename = filename + self.info_class = info_class + self.flags = flags + self.output_buf_len = output_buf_len + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_QUERY_DIRECTORY + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 33, # Structure size. Must be 33 as mandated by [MS-SMB2] 2.2.33 + self.info_class, # FileInformationClass + self.flags, # Flags + 0, # FileIndex + self.fid, # FileID + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # FileNameOffset + len(self.filename)*2, + self.output_buf_len) + self.filename.encode('UTF-16LE') + + # MS-SMB2 3.2.4.17 + # If Connection.Dialect is not "2.0.2" and if Connection.SupportsMultiCredit is TRUE, the + # CreditCharge field in the SMB2 header MUST be set to ( 1 + (OutputBufferLength - 1) / 65536 ) + if message.conn.smb2_dialect != SMB2_DIALECT_2 and message.conn.cap_multi_credit: + message.credit_charge = int(1 + (self.output_buf_len -1) / 65536) + + +class SMB2QueryDirectoryResponse(Structure): + """ + Contains information about the SMB2_COM_QUERY_DIRECTORY response from the server. + + If the message has no errors, each instance contains the following attributes: + - data_length (integer) + - data (string) + + References: + =========== + - [MS-SMB2]: 2.2.34 + """ + + STRUCTURE_FORMAT = "<HHI" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_QUERY_DIRECTORY + + if message.status == 0: + struct_size, offset, self.data_length = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + self.data = message.raw_data[offset:offset+self.data_length] + + +class SMB2QueryInfoRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.37 + """ + + STRUCTURE_FORMAT = "<HBBIHHIII16s" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, fid, flags, additional_info, info_type, file_info_class, input_buf, output_buf_len): + self.fid = fid + self.flags = flags + self.additional_info = additional_info + self.info_type = info_type + self.file_info_class = file_info_class + self.output_buf_len = output_buf_len + self.input_buf = input_buf or '' + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_QUERY_INFO + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 41, # Structure size. Must be 41 as mandated by [MS-SMB2] 2.2.37 + self.info_type, # InfoType + self.file_info_class, # FileInfoClass + self.output_buf_len, # OutputBufferLength + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # InputBufferOffset + 0, # Reserved + len(self.input_buf), # InputBufferLength + self.additional_info, # AdditionalInformation + self.flags, # Flags + self.fid # FileId + ) + self.input_buf + + # MS-SMB2 3.2.4.17 + # If Connection.Dialect is not "2.0.2" and if Connection.SupportsMultiCredit is TRUE, the + # CreditCharge field in the SMB2 header MUST be set to ( 1 + (OutputBufferLength - 1) / 65536 ) + if message.conn.smb2_dialect != SMB2_DIALECT_2 and message.conn.cap_multi_credit: + message.credit_charge = int(1 + ((self.output_buf_len + len(self.input_buf)) -1) / 65536) + + +class SMB2QueryInfoResponse(Structure): + """ + Contains information about the SMB2_COM_QUERY_INFO response from the server. + + If the message has no errors, each instance contains the following attributes: + - data_length (integer) + - data (string) + + References: + =========== + - [MS-SMB2]: 2.2.38 + """ + + STRUCTURE_FORMAT = "<HHI" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def decode(self, message): + assert message.command == SMB2_COM_QUERY_INFO + + if message.status == 0: + struct_size, buf_offset, self.data_length = struct.unpack(self.STRUCTURE_FORMAT, message.raw_data[SMB2Message.HEADER_SIZE:SMB2Message.HEADER_SIZE+self.STRUCTURE_SIZE]) + self.data = message.raw_data[buf_offset:buf_offset+self.data_length] + + +class SMB2SetInfoRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.39 + """ + + STRUCTURE_FORMAT = "<HBBIHHI16s" + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def __init__(self, fid, additional_info, info_type, file_info_class, data): + self.fid = fid + self.additional_info = additional_info + self.info_type = info_type + self.file_info_class = file_info_class + self.data = data or '' + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_SET_INFO + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 33, # StructureSize. Must be 33 as mandated by [MS-SMB2] 2.2.39 + self.info_type, # InfoType + self.file_info_class, # FileInfoClass + len(self.data), # BufferLength + SMB2Message.HEADER_SIZE + self.STRUCTURE_SIZE, # BufferOffset + 0, # Reserved + self.additional_info, # AdditionalInformation + self.fid # FileId + ) + self.data + + # MS-SMB2 3.2.4.17 + # If Connection.Dialect is not "2.0.2" and if Connection.SupportsMultiCredit is TRUE, the + # CreditCharge field in the SMB2 header MUST be set to ( 1 + (OutputBufferLength - 1) / 65536 ) + if message.conn.smb2_dialect != SMB2_DIALECT_2 and message.conn.cap_multi_credit: + message.credit_charge = int(1 + (len(self.data) -1) / 65536) + +class SMB2SetInfoResponse(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.40 + """ + + def decode(self, message): + pass + + +class SMB2EchoRequest(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.28 + """ + + STRUCTURE_FORMAT = '<HH' + STRUCTURE_SIZE = struct.calcsize(STRUCTURE_FORMAT) + + def initMessage(self, message): + Structure.initMessage(self, message) + message.command = SMB2_COM_ECHO + + def prepare(self, message): + message.data = struct.pack(self.STRUCTURE_FORMAT, + 4, # StructureSize. Must be 4 as mandated by [MS-SMB2] 2.2.29 + 0) # Reserved + +class SMB2EchoResponse(Structure): + """ + References: + =========== + - [MS-SMB2]: 2.2.29 + """ + + def decode(self, message): + pass diff --git a/plugin.video.alfa/lib/sambatools/smb/smb_constants.py b/plugin.video.alfa/lib/sambatools/smb/smb_constants.py index 5cbf6e6a..99476802 100755 --- a/plugin.video.alfa/lib/sambatools/smb/smb_constants.py +++ b/plugin.video.alfa/lib/sambatools/smb/smb_constants.py @@ -1,239 +1,257 @@ - -# Values for Command field in SMB message header -SMB_COM_CREATE_DIRECTORY = 0x00 -SMB_COM_DELETE_DIRECTORY = 0x01 -SMB_COM_CLOSE = 0x04 -SMB_COM_DELETE = 0x06 -SMB_COM_RENAME = 0x07 -SMB_COM_TRANSACTION = 0x25 -SMB_COM_ECHO = 0x2B -SMB_COM_OPEN_ANDX = 0x2D -SMB_COM_READ_ANDX = 0x2E -SMB_COM_WRITE_ANDX = 0x2F -SMB_COM_TRANSACTION2 = 0x32 -SMB_COM_NEGOTIATE = 0x72 -SMB_COM_SESSION_SETUP_ANDX = 0x73 -SMB_COM_TREE_CONNECT_ANDX = 0x75 -SMB_COM_NT_TRANSACT = 0xA0 -SMB_COM_NT_CREATE_ANDX = 0xA2 - -SMB_COMMAND_NAMES = { - 0x00: 'SMB_COM_CREATE_DIRECTORY', - 0x01: 'SMB_COM_DELETE_DIRECTORY', - 0x04: 'SMB_COM_CLOSE', - 0x06: 'SMB_COM_DELETE', - 0x25: 'SMB_COM_TRANSACTION', - 0x2B: 'SMB_COM_ECHO', - 0x2D: 'SMB_COM_OPEN_ANDX', - 0x2E: 'SMB_COM_READ_ANDX', - 0x2F: 'SMB_COM_WRITE_ANDX', - 0x32: 'SMB_COM_TRANSACTION2', - 0x72: 'SMB_COM_NEGOTIATE', - 0x73: 'SMB_COM_SESSION_SETUP_ANDX', - 0x75: 'SMB_COM_TREE_CONNECT_ANDX', - 0xA0: 'SMB_COM_NT_TRANSACT', - 0xA2: 'SMB_COM_NT_CREATE_ANDX', -} - -# Bitmask for Flags field in SMB message header -SMB_FLAGS_LOCK_AND_READ_OK = 0x01 # LANMAN1.0 -SMB_FLAGS_BUF_AVAIL = 0x02 # LANMAN1.0, Obsolete -SMB_FLAGS_CASE_INSENSITIVE = 0x08 # LANMAN1.0, Obsolete -SMB_FLAGS_CANONICALIZED_PATHS = 0x10 # LANMAN1.0, Obsolete -SMB_FLAGS_OPLOCK = 0x20 # LANMAN1.0, Obsolete -SMB_FLAGS_OPBATCH = 0x40 # LANMAN1.0, Obsolete -SMB_FLAGS_REPLY = 0x80 # LANMAN1.0 - -# Bitmask for Flags2 field in SMB message header -SMB_FLAGS2_LONG_NAMES = 0x0001 # LANMAN2.0 -SMB_FLAGS2_EAS = 0x0002 # LANMAN1.2 -SMB_FLAGS2_SMB_SECURITY_SIGNATURE = 0x0004 # NT LANMAN -SMB_FLAGS2_IS_LONG_NAME = 0x0040 # NT LANMAN -SMB_FLAGS2_DFS = 0x1000 # NT LANMAN -SMB_FLAGS2_REPARSE_PATH = 0x0400 # -SMB_FLAGS2_EXTENDED_SECURITY = 0x0800 # -SMB_FLAGS2_PAGING_IO = 0x2000 # NT LANMAN -SMB_FLAGS2_NT_STATUS = 0x4000 # NT LANMAN -SMB_FLAGS2_UNICODE = 0x8000 # NT LANMAN - -# Bitmask for Capabilities field in SMB_COM_SESSION_SETUP_ANDX response -# [MS-SMB]: 2.2.4.5.2.1 (Capabilities field) -CAP_RAW_MODE = 0x01 -CAP_MPX_MODE = 0x02 -CAP_UNICODE = 0x04 -CAP_LARGE_FILES = 0x08 -CAP_NT_SMBS = 0x10 -CAP_RPC_REMOTE_APIS = 0x20 -CAP_STATUS32 = 0x40 -CAP_LEVEL_II_OPLOCKS = 0x80 -CAP_LOCK_AND_READ = 0x0100 -CAP_NT_FIND = 0x0200 -CAP_DFS = 0x1000 -CAP_INFOLEVEL_PASSTHRU = 0x2000 -CAP_LARGE_READX = 0x4000 -CAP_LARGE_WRITEX = 0x8000 -CAP_LWIO = 0x010000 -CAP_UNIX = 0x800000 -CAP_COMPRESSED = 0x02000000 -CAP_DYNAMIC_REAUTH = 0x20000000 -CAP_PERSISTENT_HANDLES = 0x40000000 -CAP_EXTENDED_SECURITY = 0x80000000 - -# Value for Action field in SMB_COM_SESSION_SETUP_ANDX response -SMB_SETUP_GUEST = 0x0001 -SMB_SETUP_USE_LANMAN_KEY = 0X0002 - -# Bitmask for SecurityMode field in SMB_COM_NEGOTIATE response -NEGOTIATE_USER_SECURITY = 0x01 -NEGOTIATE_ENCRYPT_PASSWORDS = 0x02 -NEGOTIATE_SECURITY_SIGNATURES_ENABLE = 0x04 -NEGOTIATE_SECURITY_SIGNATURES_REQUIRE = 0x08 - -# Available constants for Service field in SMB_COM_TREE_CONNECT_ANDX request -# [MS-CIFS]: 2.2.4.55.1 (Service field) -SERVICE_PRINTER = 'LPT1:' -SERVICE_NAMED_PIPE = 'IPC' -SERVICE_COMM = 'COMM' -SERVICE_ANY = '?????' - -# Bitmask for Flags field in SMB_COM_NT_CREATE_ANDX request -# [MS-CIFS]: 2.2.4.64.1 -# [MS-SMB]: 2.2.4.9.1 -NT_CREATE_REQUEST_OPLOCK = 0x02 -NT_CREATE_REQUEST_OPBATCH = 0x04 -NT_CREATE_OPEN_TARGET_DIR = 0x08 -NT_CREATE_REQUEST_EXTENDED_RESPONSE = 0x10 # Defined in [MS-SMB]: 2.2.4.9.1 - -# Bitmask for DesiredAccess field in SMB_COM_NT_CREATE_ANDX request -# and SMB2CreateRequest class -# Also used for MaximalAccess field in SMB2TreeConnectResponse class -# [MS-CIFS]: 2.2.4.64.1 -# [MS-SMB2]: 2.2.13.1.1 -FILE_READ_DATA = 0x01 -FILE_WRITE_DATA = 0X02 -FILE_APPEND_DATA = 0x04 -FILE_READ_EA = 0x08 -FILE_WRITE_EA = 0x10 -FILE_EXECUTE = 0x20 -FILE_READ_ATTRIBUTES = 0x80 -FILE_WRITE_ATTRIBUTES = 0x0100 -DELETE = 0x010000 -READ_CONTROL = 0x020000 -WRITE_DAC = 0x040000 -WRITE_OWNER = 0x080000 -SYNCHRONIZE = 0x100000 -ACCESS_SYSTEM_SECURITY = 0x01000000 -MAXIMUM_ALLOWED = 0x02000000 -GENERIC_ALL = 0x10000000 -GENERIC_EXECUTE = 0x20000000 -GENERIC_WRITE = 0x40000000 -GENERIC_READ = 0x80000000L - -# SMB_EXT_FILE_ATTR bitmask ([MS-CIFS]: 2.2.1.2.3) -# Includes extensions defined in [MS-SMB] 2.2.1.2.1 -# Bitmask for FileAttributes field in SMB_COM_NT_CREATE_ANDX request ([MS-CIFS]: 2.2.4.64.1) -# Also used for FileAttributes field in SMB2CreateRequest class ([MS-SMB2]: 2.2.13) -ATTR_READONLY = 0x01 -ATTR_HIDDEN = 0x02 -ATTR_SYSTEM = 0x04 -ATTR_DIRECTORY = 0x10 -ATTR_ARCHIVE = 0x20 -ATTR_NORMAL = 0x80 -ATTR_TEMPORARY = 0x0100 -ATTR_SPARSE = 0x0200 -ATTR_REPARSE_POINT = 0x0400 -ATTR_COMPRESSED = 0x0800 -ATTR_OFFLINE = 0x1000 -ATTR_NOT_CONTENT_INDEXED = 0x2000 -ATTR_ENCRYPTED = 0x4000 -POSIX_SEMANTICS = 0x01000000 -BACKUP_SEMANTICS = 0x02000000 -DELETE_ON_CLOSE = 0x04000000 -SEQUENTIAL_SCAN = 0x08000000 -RANDOM_ACCESS = 0x10000000 -NO_BUFFERING = 0x20000000 -WRITE_THROUGH = 0x80000000 - -# Bitmask for ShareAccess field in SMB_COM_NT_CREATE_ANDX request -# and SMB2CreateRequest class -# [MS-CIFS]: 2.2.4.64.1 -# [MS-SMB2]: 2.2.13 -FILE_SHARE_NONE = 0x00 -FILE_SHARE_READ = 0x01 -FILE_SHARE_WRITE = 0x02 -FILE_SHARE_DELETE = 0x04 - -# Values for CreateDisposition field in SMB_COM_NT_CREATE_ANDX request -# and SMB2CreateRequest class -# [MS-CIFS]: 2.2.4.64.1 -# [MS-SMB2]: 2.2.13 -FILE_SUPERSEDE = 0x00 -FILE_OPEN = 0x01 -FILE_CREATE = 0x02 -FILE_OPEN_IF = 0x03 -FILE_OVERWRITE = 0x04 -FILE_OVERWRITE_IF = 0x05 - -# Bitmask for CreateOptions field in SMB_COM_NT_CREATE_ANDX request -# and SMB2CreateRequest class -# [MS-CIFS]: 2.2.4.64.1 -# [MS-SMB2]: 2.2.13 -FILE_DIRECTORY_FILE = 0x01 -FILE_WRITE_THROUGH = 0x02 -FILE_SEQUENTIAL_ONLY = 0x04 -FILE_NO_INTERMEDIATE_BUFFERING = 0x08 -FILE_SYNCHRONOUS_IO_ALERT = 0x10 -FILE_SYNCHRONOUS_IO_NONALERT = 0x20 -FILE_NON_DIRECTORY_FILE = 0x40 -FILE_CREATE_TREE_CONNECTION = 0x80 -FILE_COMPLETE_IF_OPLOCKED = 0x0100 -FILE_NO_EA_KNOWLEDGE = 0x0200 -FILE_OPEN_FOR_RECOVERY = 0x0400 -FILE_RANDOM_ACCESS = 0x0800 -FILE_DELETE_ON_CLOSE = 0x1000 -FILE_OPEN_BY_FILE_ID = 0x2000 -FILE_OPEN_FOR_BACKUP_INTENT = 0x4000 -FILE_NO_COMPRESSION = 0x8000 -FILE_RESERVE_OPFILTER = 0x100000 -FILE_OPEN_NO_RECALL = 0x400000 -FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x800000 - -# Values for ImpersonationLevel field in SMB_COM_NT_CREATE_ANDX request -# and SMB2CreateRequest class -# For interpretations about these values, refer to [MS-WSO] and [MSDN-IMPERS] -# [MS-CIFS]: 2.2.4.64.1 -# [MS-SMB]: 2.2.4.9.1 -# [MS-SMB2]: 2.2.13 -SEC_ANONYMOUS = 0x00 -SEC_IDENTIFY = 0x01 -SEC_IMPERSONATE = 0x02 -SEC_DELEGATION = 0x03 # Defined in [MS-SMB]: 2.2.4.9.1 - -# Values for SecurityFlags field in SMB_COM_NT_CREATE_ANDX request -# [MS-CIFS]: 2.2.4.64.1 -SMB_SECURITY_CONTEXT_TRACKING = 0x01 -SMB_SECURITY_EFFECTIVE_ONLY = 0x02 - -# Bitmask for Flags field in SMB_COM_TRANSACTION2 request -# [MS-CIFS]: 2.2.4.46.1 -DISCONNECT_TID = 0x01 -NO_RESPONSE = 0x02 - -# Bitmask for basic file attributes -# [MS-CIFS]: 2.2.1.2.4 -SMB_FILE_ATTRIBUTE_NORMAL = 0x00 -SMB_FILE_ATTRIBUTE_READONLY = 0x01 -SMB_FILE_ATTRIBUTE_HIDDEN = 0x02 -SMB_FILE_ATTRIBUTE_SYSTEM = 0x04 -SMB_FILE_ATTRIBUTE_VOLUME = 0x08 -SMB_FILE_ATTRIBUTE_DIRECTORY = 0x10 -SMB_FILE_ATTRIBUTE_ARCHIVE = 0x20 -SMB_SEARCH_ATTRIBUTE_READONLY = 0x0100 -SMB_SEARCH_ATTRIBUTE_HIDDEN = 0x0200 -SMB_SEARCH_ATTRIBUTE_SYSTEM = 0x0400 -SMB_SEARCH_ATTRIBUTE_DIRECTORY = 0x1000 -SMB_SEARCH_ATTRIBUTE_ARCHIVE = 0x2000 - -# Bitmask for OptionalSupport field in SMB_COM_TREE_CONNECT_ANDX response -SMB_TREE_CONNECTX_SUPPORT_SEARCH = 0x0001 -SMB_TREE_CONNECTX_SUPPORT_DFS = 0x0002 + +# Values for Command field in SMB message header +SMB_COM_CREATE_DIRECTORY = 0x00 +SMB_COM_DELETE_DIRECTORY = 0x01 +SMB_COM_CLOSE = 0x04 +SMB_COM_DELETE = 0x06 +SMB_COM_RENAME = 0x07 +SMB_COM_TRANSACTION = 0x25 +SMB_COM_ECHO = 0x2B +SMB_COM_OPEN_ANDX = 0x2D +SMB_COM_READ_ANDX = 0x2E +SMB_COM_WRITE_ANDX = 0x2F +SMB_COM_TRANSACTION2 = 0x32 +SMB_COM_NEGOTIATE = 0x72 +SMB_COM_SESSION_SETUP_ANDX = 0x73 +SMB_COM_TREE_CONNECT_ANDX = 0x75 +SMB_COM_NT_TRANSACT = 0xA0 +SMB_COM_NT_CREATE_ANDX = 0xA2 + +SMB_COMMAND_NAMES = { + 0x00: 'SMB_COM_CREATE_DIRECTORY', + 0x01: 'SMB_COM_DELETE_DIRECTORY', + 0x04: 'SMB_COM_CLOSE', + 0x06: 'SMB_COM_DELETE', + 0x25: 'SMB_COM_TRANSACTION', + 0x2B: 'SMB_COM_ECHO', + 0x2D: 'SMB_COM_OPEN_ANDX', + 0x2E: 'SMB_COM_READ_ANDX', + 0x2F: 'SMB_COM_WRITE_ANDX', + 0x32: 'SMB_COM_TRANSACTION2', + 0x72: 'SMB_COM_NEGOTIATE', + 0x73: 'SMB_COM_SESSION_SETUP_ANDX', + 0x75: 'SMB_COM_TREE_CONNECT_ANDX', + 0xA0: 'SMB_COM_NT_TRANSACT', + 0xA2: 'SMB_COM_NT_CREATE_ANDX', +} + +# Bitmask for Flags field in SMB message header +SMB_FLAGS_LOCK_AND_READ_OK = 0x01 # LANMAN1.0 +SMB_FLAGS_BUF_AVAIL = 0x02 # LANMAN1.0, Obsolete +SMB_FLAGS_CASE_INSENSITIVE = 0x08 # LANMAN1.0, Obsolete +SMB_FLAGS_CANONICALIZED_PATHS = 0x10 # LANMAN1.0, Obsolete +SMB_FLAGS_OPLOCK = 0x20 # LANMAN1.0, Obsolete +SMB_FLAGS_OPBATCH = 0x40 # LANMAN1.0, Obsolete +SMB_FLAGS_REPLY = 0x80 # LANMAN1.0 + +# Bitmask for Flags2 field in SMB message header +SMB_FLAGS2_LONG_NAMES = 0x0001 # LANMAN2.0 +SMB_FLAGS2_EAS = 0x0002 # LANMAN1.2 +SMB_FLAGS2_SMB_SECURITY_SIGNATURE = 0x0004 # NT LANMAN +SMB_FLAGS2_IS_LONG_NAME = 0x0040 # NT LANMAN +SMB_FLAGS2_DFS = 0x1000 # NT LANMAN +SMB_FLAGS2_REPARSE_PATH = 0x0400 # +SMB_FLAGS2_EXTENDED_SECURITY = 0x0800 # +SMB_FLAGS2_PAGING_IO = 0x2000 # NT LANMAN +SMB_FLAGS2_NT_STATUS = 0x4000 # NT LANMAN +SMB_FLAGS2_UNICODE = 0x8000 # NT LANMAN + +# Bitmask for Capabilities field in SMB_COM_SESSION_SETUP_ANDX response +# [MS-SMB]: 2.2.4.5.2.1 (Capabilities field) +CAP_RAW_MODE = 0x01 +CAP_MPX_MODE = 0x02 +CAP_UNICODE = 0x04 +CAP_LARGE_FILES = 0x08 +CAP_NT_SMBS = 0x10 +CAP_RPC_REMOTE_APIS = 0x20 +CAP_STATUS32 = 0x40 +CAP_LEVEL_II_OPLOCKS = 0x80 +CAP_LOCK_AND_READ = 0x0100 +CAP_NT_FIND = 0x0200 +CAP_DFS = 0x1000 +CAP_INFOLEVEL_PASSTHRU = 0x2000 +CAP_LARGE_READX = 0x4000 +CAP_LARGE_WRITEX = 0x8000 +CAP_LWIO = 0x010000 +CAP_UNIX = 0x800000 +CAP_COMPRESSED = 0x02000000 +CAP_DYNAMIC_REAUTH = 0x20000000 +CAP_PERSISTENT_HANDLES = 0x40000000 +CAP_EXTENDED_SECURITY = 0x80000000 + +# Value for Action field in SMB_COM_SESSION_SETUP_ANDX response +SMB_SETUP_GUEST = 0x0001 +SMB_SETUP_USE_LANMAN_KEY = 0X0002 + +# Bitmask for SecurityMode field in SMB_COM_NEGOTIATE response +NEGOTIATE_USER_SECURITY = 0x01 +NEGOTIATE_ENCRYPT_PASSWORDS = 0x02 +NEGOTIATE_SECURITY_SIGNATURES_ENABLE = 0x04 +NEGOTIATE_SECURITY_SIGNATURES_REQUIRE = 0x08 + +# Available constants for Service field in SMB_COM_TREE_CONNECT_ANDX request +# [MS-CIFS]: 2.2.4.55.1 (Service field) +SERVICE_PRINTER = 'LPT1:' +SERVICE_NAMED_PIPE = 'IPC' +SERVICE_COMM = 'COMM' +SERVICE_ANY = '?????' + +# Bitmask for Flags field in SMB_COM_NT_CREATE_ANDX request +# [MS-CIFS]: 2.2.4.64.1 +# [MS-SMB]: 2.2.4.9.1 +NT_CREATE_REQUEST_OPLOCK = 0x02 +NT_CREATE_REQUEST_OPBATCH = 0x04 +NT_CREATE_OPEN_TARGET_DIR = 0x08 +NT_CREATE_REQUEST_EXTENDED_RESPONSE = 0x10 # Defined in [MS-SMB]: 2.2.4.9.1 + +# Bitmask for DesiredAccess field in SMB_COM_NT_CREATE_ANDX request +# and SMB2CreateRequest class +# Also used for MaximalAccess field in SMB2TreeConnectResponse class +# [MS-CIFS]: 2.2.4.64.1 +# [MS-SMB2]: 2.2.13.1.1 +FILE_READ_DATA = 0x01 +FILE_WRITE_DATA = 0X02 +FILE_APPEND_DATA = 0x04 +FILE_READ_EA = 0x08 +FILE_WRITE_EA = 0x10 +FILE_EXECUTE = 0x20 +FILE_DELETE_CHILD = 0x40 +FILE_READ_ATTRIBUTES = 0x80 +FILE_WRITE_ATTRIBUTES = 0x0100 +DELETE = 0x010000 +READ_CONTROL = 0x020000 +WRITE_DAC = 0x040000 +WRITE_OWNER = 0x080000 +SYNCHRONIZE = 0x100000 +ACCESS_SYSTEM_SECURITY = 0x01000000 +MAXIMUM_ALLOWED = 0x02000000 +GENERIC_ALL = 0x10000000 +GENERIC_EXECUTE = 0x20000000 +GENERIC_WRITE = 0x40000000 +GENERIC_READ = 0x80000000L + +# SMB_EXT_FILE_ATTR bitmask ([MS-CIFS]: 2.2.1.2.3) +# Includes extensions defined in [MS-SMB] 2.2.1.2.1 +# Bitmask for FileAttributes field in SMB_COM_NT_CREATE_ANDX request ([MS-CIFS]: 2.2.4.64.1) +# Also used for FileAttributes field in SMB2CreateRequest class ([MS-SMB2]: 2.2.13) +ATTR_READONLY = 0x01 +ATTR_HIDDEN = 0x02 +ATTR_SYSTEM = 0x04 +ATTR_DIRECTORY = 0x10 +ATTR_ARCHIVE = 0x20 +ATTR_NORMAL = 0x80 +ATTR_TEMPORARY = 0x0100 +ATTR_SPARSE = 0x0200 +ATTR_REPARSE_POINT = 0x0400 +ATTR_COMPRESSED = 0x0800 +ATTR_OFFLINE = 0x1000 +ATTR_NOT_CONTENT_INDEXED = 0x2000 +ATTR_ENCRYPTED = 0x4000 +POSIX_SEMANTICS = 0x01000000 +BACKUP_SEMANTICS = 0x02000000 +DELETE_ON_CLOSE = 0x04000000 +SEQUENTIAL_SCAN = 0x08000000 +RANDOM_ACCESS = 0x10000000 +NO_BUFFERING = 0x20000000 +WRITE_THROUGH = 0x80000000 + +# Bitmask for ShareAccess field in SMB_COM_NT_CREATE_ANDX request +# and SMB2CreateRequest class +# [MS-CIFS]: 2.2.4.64.1 +# [MS-SMB2]: 2.2.13 +FILE_SHARE_NONE = 0x00 +FILE_SHARE_READ = 0x01 +FILE_SHARE_WRITE = 0x02 +FILE_SHARE_DELETE = 0x04 + +# Values for CreateDisposition field in SMB_COM_NT_CREATE_ANDX request +# and SMB2CreateRequest class +# [MS-CIFS]: 2.2.4.64.1 +# [MS-SMB2]: 2.2.13 +FILE_SUPERSEDE = 0x00 +FILE_OPEN = 0x01 +FILE_CREATE = 0x02 +FILE_OPEN_IF = 0x03 +FILE_OVERWRITE = 0x04 +FILE_OVERWRITE_IF = 0x05 + +# Bitmask for CreateOptions field in SMB_COM_NT_CREATE_ANDX request +# and SMB2CreateRequest class +# [MS-CIFS]: 2.2.4.64.1 +# [MS-SMB2]: 2.2.13 +FILE_DIRECTORY_FILE = 0x01 +FILE_WRITE_THROUGH = 0x02 +FILE_SEQUENTIAL_ONLY = 0x04 +FILE_NO_INTERMEDIATE_BUFFERING = 0x08 +FILE_SYNCHRONOUS_IO_ALERT = 0x10 +FILE_SYNCHRONOUS_IO_NONALERT = 0x20 +FILE_NON_DIRECTORY_FILE = 0x40 +FILE_CREATE_TREE_CONNECTION = 0x80 +FILE_COMPLETE_IF_OPLOCKED = 0x0100 +FILE_NO_EA_KNOWLEDGE = 0x0200 +FILE_OPEN_FOR_RECOVERY = 0x0400 +FILE_RANDOM_ACCESS = 0x0800 +FILE_DELETE_ON_CLOSE = 0x1000 +FILE_OPEN_BY_FILE_ID = 0x2000 +FILE_OPEN_FOR_BACKUP_INTENT = 0x4000 +FILE_NO_COMPRESSION = 0x8000 +FILE_RESERVE_OPFILTER = 0x100000 +FILE_OPEN_NO_RECALL = 0x400000 +FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x800000 + +# Values for ImpersonationLevel field in SMB_COM_NT_CREATE_ANDX request +# and SMB2CreateRequest class +# For interpretations about these values, refer to [MS-WSO] and [MSDN-IMPERS] +# [MS-CIFS]: 2.2.4.64.1 +# [MS-SMB]: 2.2.4.9.1 +# [MS-SMB2]: 2.2.13 +SEC_ANONYMOUS = 0x00 +SEC_IDENTIFY = 0x01 +SEC_IMPERSONATE = 0x02 +SEC_DELEGATION = 0x03 # Defined in [MS-SMB]: 2.2.4.9.1 + +# Values for SecurityFlags field in SMB_COM_NT_CREATE_ANDX request +# [MS-CIFS]: 2.2.4.64.1 +SMB_SECURITY_CONTEXT_TRACKING = 0x01 +SMB_SECURITY_EFFECTIVE_ONLY = 0x02 + +# Bitmask for Flags field in SMB_COM_TRANSACTION2 request +# [MS-CIFS]: 2.2.4.46.1 +DISCONNECT_TID = 0x01 +NO_RESPONSE = 0x02 + +# Bitmask for basic file attributes +# [MS-CIFS]: 2.2.1.2.4 +SMB_FILE_ATTRIBUTE_NORMAL = 0x00 +SMB_FILE_ATTRIBUTE_READONLY = 0x01 +SMB_FILE_ATTRIBUTE_HIDDEN = 0x02 +SMB_FILE_ATTRIBUTE_SYSTEM = 0x04 +SMB_FILE_ATTRIBUTE_VOLUME = 0x08 # Unsupported for listPath() operations +SMB_FILE_ATTRIBUTE_DIRECTORY = 0x10 +SMB_FILE_ATTRIBUTE_ARCHIVE = 0x20 +# SMB_FILE_ATTRIBUTE_INCL_NORMAL is a special placeholder to include normal files for +# with other search attributes for listPath() operations. It is not defined in the MS-CIFS specs. +SMB_FILE_ATTRIBUTE_INCL_NORMAL = 0x10000 +# Do not use the following values for listPath() operations as they are not supported for SMB2 +SMB_SEARCH_ATTRIBUTE_READONLY = 0x0100 +SMB_SEARCH_ATTRIBUTE_HIDDEN = 0x0200 +SMB_SEARCH_ATTRIBUTE_SYSTEM = 0x0400 +SMB_SEARCH_ATTRIBUTE_DIRECTORY = 0x1000 +SMB_SEARCH_ATTRIBUTE_ARCHIVE = 0x2000 + +# Bitmask for OptionalSupport field in SMB_COM_TREE_CONNECT_ANDX response +SMB_TREE_CONNECTX_SUPPORT_SEARCH = 0x0001 +SMB_TREE_CONNECTX_SUPPORT_DFS = 0x0002 + +# Bitmask for security information fields, specified as +# AdditionalInformation in SMB2 +# [MS-SMB]: 2.2.7.4 +# [MS-SMB2]: 2.2.37 +OWNER_SECURITY_INFORMATION = 0x00000001 +GROUP_SECURITY_INFORMATION = 0x00000002 +DACL_SECURITY_INFORMATION = 0x00000004 +SACL_SECURITY_INFORMATION = 0x00000008 +LABEL_SECURITY_INFORMATION = 0x00000010 +ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 +SCOPE_SECURITY_INFORMATION = 0x00000040 +BACKUP_SECURITY_INFORMATION = 0x00010000 diff --git a/plugin.video.alfa/lib/sambatools/smb/smb_structs.py b/plugin.video.alfa/lib/sambatools/smb/smb_structs.py index ad4f942f..aa81ac9c 100755 --- a/plugin.video.alfa/lib/sambatools/smb/smb_structs.py +++ b/plugin.video.alfa/lib/sambatools/smb/smb_structs.py @@ -1,1423 +1,1421 @@ -import binascii -import logging -import os -import struct -import time -from StringIO import StringIO - -from smb_constants import * - -# Set to True if you want to enable support for extended security. Required for Windows Vista and later -SUPPORT_EXTENDED_SECURITY = True - -# Set to True if you want to enable SMB2 protocol. -SUPPORT_SMB2 = True - -# Supported dialects -DIALECTS = [ ] -for i, ( name, dialect ) in enumerate([ ( 'NT_LAN_MANAGER_DIALECT', 'NT LM 0.12' ), ]): - DIALECTS.append(dialect) - globals()[name] = i - -DIALECTS2 = [ ] -for i, ( name, dialect ) in enumerate([ ( 'SMB2_DIALECT', 'SMB 2.002' ) ]): - DIALECTS2.append(dialect) - globals()[name] = i + len(DIALECTS) - - -class UnsupportedFeature(Exception): - """ - Raised when an supported feature is present/required in the protocol but is not - currently supported by pysmb - """ - pass - - -class ProtocolError(Exception): - - def __init__(self, message, data_buf = None, smb_message = None): - self.message = message - self.data_buf = data_buf - self.smb_message = smb_message - - def __str__(self): - b = StringIO() - b.write(self.message + os.linesep) - if self.smb_message: - b.write('=' * 20 + ' SMB Message ' + '=' * 20 + os.linesep) - b.write(str(self.smb_message)) - - if self.data_buf: - b.write('=' * 20 + ' SMB Data Packet (hex) ' + '=' * 20 + os.linesep) - b.write(binascii.hexlify(self.data_buf)) - b.write(os.linesep) - - return b.getvalue() - -class SMB2ProtocolHeaderError(ProtocolError): - - def __init__(self): - ProtocolError.__init__(self, "Packet header belongs to SMB2") - -class OperationFailure(Exception): - - def __init__(self, message, smb_messages): - self.args = [ message ] - self.message = message - self.smb_messages = smb_messages - - def __str__(self): - b = StringIO() - b.write(self.message + os.linesep) - - for idx, m in enumerate(self.smb_messages): - b.write('=' * 20 + ' SMB Message %d ' % idx + '=' * 20 + os.linesep) - b.write('SMB Header:' + os.linesep) - b.write('-----------' + os.linesep) - b.write(str(m)) - b.write('SMB Data Packet (hex):' + os.linesep) - b.write('----------------------' + os.linesep) - b.write(binascii.hexlify(m.raw_data)) - b.write(os.linesep) - - return b.getvalue() - - -class SMBError: - - def __init__(self): - self.reset() - - def reset(self): - self.internal_value = 0L - self.is_ntstatus = True - - def __str__(self): - if self.is_ntstatus: - return 'NTSTATUS=0x%08X' % self.internal_value - else: - return 'ErrorClass=0x%02X ErrorCode=0x%04X' % ( self.internal_value >> 24, self.internal_value & 0xFFFF ) - - @property - def hasError(self): - return self.internal_value != 0 - - -class SMBMessage: - - HEADER_STRUCT_FORMAT = "<4sBIBHHQxxHHHHB" - HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) - - log = logging.getLogger('SMB.SMBMessage') - protocol = 1 - - def __init__(self, payload = None): - self.reset() - if payload: - self.payload = payload - self.payload.initMessage(self) - - def __str__(self): - b = StringIO() - b.write('Command: 0x%02X (%s) %s' % ( self.command, SMB_COMMAND_NAMES.get(self.command, '<unknown>'), os.linesep )) - b.write('Status: %s %s' % ( str(self.status), os.linesep )) - b.write('Flags: 0x%02X %s' % ( self.flags, os.linesep )) - b.write('Flags2: 0x%04X %s' % ( self.flags2, os.linesep )) - b.write('PID: %d %s' % ( self.pid, os.linesep )) - b.write('UID: %d %s' % ( self.uid, os.linesep )) - b.write('MID: %d %s' % ( self.mid, os.linesep )) - b.write('TID: %d %s' % ( self.tid, os.linesep )) - b.write('Security: 0x%016X %s' % ( self.security, os.linesep )) - b.write('Parameters: %d bytes %s%s %s' % ( len(self.parameters_data), os.linesep, binascii.hexlify(self.parameters_data), os.linesep )) - b.write('Data: %d bytes %s%s %s' % ( len(self.data), os.linesep, binascii.hexlify(self.data), os.linesep )) - return b.getvalue() - - def reset(self): - self.raw_data = '' - self.command = 0 - self.status = SMBError() - self.flags = 0 - self.flags2 = 0 - self.pid = 0 - self.tid = 0 - self.uid = 0 - self.mid = 0 - self.security = 0L - self.parameters_data = '' - self.data = '' - self.payload = None - - @property - def isReply(self): - return bool(self.flags & SMB_FLAGS_REPLY) - - @property - def hasExtendedSecurity(self): - return bool(self.flags2 & SMB_FLAGS2_EXTENDED_SECURITY) - - def encode(self): - """ - Encode this SMB message into a series of bytes suitable to be embedded with a NetBIOS session message. - AssertionError will be raised if this SMB message has not been initialized with a Payload instance - - @return: a string containing the encoded SMB message - """ - assert self.payload - - self.pid = os.getpid() - self.payload.prepare(self) - - parameters_len = len(self.parameters_data) - assert parameters_len % 2 == 0 - - headers_data = struct.pack(self.HEADER_STRUCT_FORMAT, - '\xFFSMB', self.command, self.status.internal_value, self.flags, - self.flags2, (self.pid >> 16) & 0xFFFF, self.security, self.tid, - self.pid & 0xFFFF, self.uid, self.mid, int(parameters_len / 2)) - return headers_data + self.parameters_data + struct.pack('<H', len(self.data)) + self.data - - def decode(self, buf): - """ - Decodes the SMB message in buf. - All fields of the SMBMessage object will be reset to default values before decoding. - On errors, do not assume that the fields will be reinstated back to what they are before - this method is invoked. - - @param buf: data containing one complete SMB message - @type buf: string - @return: a positive integer indicating the number of bytes used in buf to decode this SMB message - @raise ProtocolError: raised when decoding fails - """ - buf_len = len(buf) - if buf_len < self.HEADER_STRUCT_SIZE: - # We need at least 32 bytes (header) + 1 byte (parameter count) - raise ProtocolError('Not enough data to decode SMB header', buf) - - self.reset() - - protocol, self.command, status, self.flags, \ - self.flags2, pid_high, self.security, self.tid, \ - pid_low, self.uid, self.mid, params_count = struct.unpack(self.HEADER_STRUCT_FORMAT, buf[:self.HEADER_STRUCT_SIZE]) - - if protocol == '\xFESMB': - raise SMB2ProtocolHeaderError() - if protocol != '\xFFSMB': - raise ProtocolError('Invalid 4-byte protocol field', buf) - - self.pid = (pid_high << 16) | pid_low - self.status.internal_value = status - self.status.is_ntstatus = bool(self.flags2 & SMB_FLAGS2_NT_STATUS) - - offset = self.HEADER_STRUCT_SIZE - if buf_len < params_count * 2 + 2: - # Not enough data in buf to decode up to body length - raise ProtocolError('Not enough data. Parameters list decoding failed', buf) - - datalen_offset = offset + params_count*2 - body_len = struct.unpack('<H', buf[datalen_offset:datalen_offset+2])[0] - if body_len > 0 and buf_len < (datalen_offset + 2 + body_len): - # Not enough data in buf to decode body - raise ProtocolError('Not enough data. Body decoding failed', buf) - - self.parameters_data = buf[offset:datalen_offset] - - if body_len > 0: - self.data = buf[datalen_offset+2:datalen_offset+2+body_len] - - self.raw_data = buf - self._decodePayload() - - return self.HEADER_STRUCT_SIZE + params_count * 2 + 2 + body_len - - def _decodePayload(self): - if self.command == SMB_COM_READ_ANDX: - self.payload = ComReadAndxResponse() - elif self.command == SMB_COM_WRITE_ANDX: - self.payload = ComWriteAndxResponse() - elif self.command == SMB_COM_TRANSACTION: - self.payload = ComTransactionResponse() - elif self.command == SMB_COM_TRANSACTION2: - self.payload = ComTransaction2Response() - elif self.command == SMB_COM_OPEN_ANDX: - self.payload = ComOpenAndxResponse() - elif self.command == SMB_COM_NT_CREATE_ANDX: - self.payload = ComNTCreateAndxResponse() - elif self.command == SMB_COM_TREE_CONNECT_ANDX: - self.payload = ComTreeConnectAndxResponse() - elif self.command == SMB_COM_ECHO: - self.payload = ComEchoResponse() - elif self.command == SMB_COM_SESSION_SETUP_ANDX: - self.payload = ComSessionSetupAndxResponse() - elif self.command == SMB_COM_NEGOTIATE: - self.payload = ComNegotiateResponse() - - if self.payload: - self.payload.decode(self) - - -class Payload: - - DEFAULT_ANDX_PARAM_HEADER = '\xFF\x00\x00\x00' - DEFAULT_ANDX_PARAM_SIZE = 4 - - def initMessage(self, message): - # SMB_FLAGS2_UNICODE must always be enabled. Without this, almost all the Payload subclasses will need to be - # rewritten to check for OEM/Unicode strings which will be tedious. Fortunately, almost all tested CIFS services - # support SMB_FLAGS2_UNICODE by default. - assert message.payload == self - message.flags = SMB_FLAGS_CASE_INSENSITIVE | SMB_FLAGS_CANONICALIZED_PATHS - message.flags2 = SMB_FLAGS2_UNICODE | SMB_FLAGS2_NT_STATUS | SMB_FLAGS2_LONG_NAMES | SMB_FLAGS2_EAS - - if SUPPORT_EXTENDED_SECURITY: - message.flags2 |= SMB_FLAGS2_EXTENDED_SECURITY | SMB_FLAGS2_SMB_SECURITY_SIGNATURE - - def prepare(self, message): - raise NotImplementedError - - def decode(self, message): - raise NotImplementedError - - -class ComNegotiateRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.52.1 - - [MS-SMB]: 2.2.4.5.1 - """ - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_NEGOTIATE - - def prepare(self, message): - assert message.payload == self - message.parameters_data = '' - if SUPPORT_SMB2: - message.data = ''.join(map(lambda s: '\x02'+s+'\x00', DIALECTS + DIALECTS2)) - else: - message.data = ''.join(map(lambda s: '\x02'+s+'\x00', DIALECTS)) - - -class ComNegotiateResponse(Payload): - """ - Contains information on the SMB_COM_NEGOTIATE response from server - - After calling the decode method, each instance will contain the following attributes, - - security_mode (integer) - - max_mpx_count (integer) - - max_number_vcs (integer) - - max_buffer_size (long) - - max_raw_size (long) - - session_key (long) - - capabilities (long) - - system_time (long) - - server_time_zone (integer) - - challenge_length (integer) - - If the underlying SMB message's flag2 does not have SMB_FLAGS2_EXTENDED_SECURITY bit enabled, - then the instance will have the following additional attributes, - - challenge (string) - - domain (unicode) - - If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled, - then the instance will have the following additional attributes, - - server_guid (string) - - security_blob (string) - - References: - =========== - - [MS-SMB]: 2.2.4.5.2.1 - - [MS-CIFS]: 2.2.4.52.2 - """ - - PAYLOAD_STRUCT_FORMAT = '<HBHHIIIIQHB' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_NEGOTIATE - - if not message.isReply: - raise ProtocolError('Not a SMB_COM_NEGOTIATE reply', message.raw_data, message) - - self.security_mode, self.max_mpx_count, self.max_number_vcs, self.max_buffer_size, \ - self.max_raw_size, self.session_key, self.capabilities, self.system_time, self.server_time_zone, \ - self.challenge_length = ( 0, ) * 10 - - data_len = len(message.parameters_data) - if data_len < 2: - raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE dialect_index field', message.raw_data, message) - - self.dialect_index = struct.unpack('<H', message.parameters_data[:2])[0] - if self.dialect_index == NT_LAN_MANAGER_DIALECT: - if data_len != (0x11 * 2): - raise ProtocolError('NT LAN Manager dialect selected in SMB_COM_NEGOTIATE but parameters bytes count (%d) does not meet specs' % data_len, - message.raw_data, message) - else: - _, self.security_mode, self.max_mpx_count, self.max_number_vcs, self.max_buffer_size, \ - self.max_raw_size, self.session_key, self.capabilities, self.system_time, self.server_time_zone, \ - self.challenge_length = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - elif self.dialect_index == 0xFFFF: - raise ProtocolError('Server does not support any of the pysmb dialects. Please email pysmb to add in support for your OS', - message.raw_data, message) - else: - raise ProtocolError('Unknown dialect index (0x%04X)' % self.dialect_index, message.raw_data, message) - - data_len = len(message.data) - if not message.hasExtendedSecurity: - self.challenge, self.domain = '', '' - if self.challenge_length > 0: - if data_len >= self.challenge_length: - self.challenge = message.data[:self.challenge_length] - - s = '' - offset = self.challenge_length - while offset < data_len: - _s = message.data[offset:offset+2] - if _s == '\0\0': - self.domain = s.decode('UTF-16LE') - break - else: - s += _s - offset += 2 - else: - raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE (without security extensions) Challenge field', message.raw_data, message) - else: - if data_len < 16: - raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE (with security extensions) ServerGUID field', message.raw_data, message) - - self.server_guid = message.data[:16] - self.security_blob = message.data[16:] - - @property - def supportsExtendedSecurity(self): - return bool(self.capabilities & CAP_EXTENDED_SECURITY) - - -class ComSessionSetupAndxRequest__WithSecurityExtension(Payload): - """ - References: - =========== - - [MS-SMB]: 2.2.4.6.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HHHIHII' - - def __init__(self, session_key, security_blob): - self.session_key = session_key - self.security_blob = security_blob - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_SESSION_SETUP_ANDX - - def prepare(self, message): - assert message.hasExtendedSecurity - - message.flags2 |= SMB_FLAGS2_UNICODE - - cap = CAP_UNICODE | CAP_STATUS32 | CAP_EXTENDED_SECURITY | CAP_NT_SMBS - - message.parameters_data = \ - self.DEFAULT_ANDX_PARAM_HEADER + \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - 16644, 10, 1, self.session_key, len(self.security_blob), 0, cap) - - message.data = self.security_blob - if (SMBMessage.HEADER_STRUCT_SIZE + len(message.parameters_data) + len(message.data)) % 2 != 0: - message.data = message.data + '\0' - message.data = message.data + '\0' * 4 - - -class ComSessionSetupAndxRequest__NoSecurityExtension(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.53.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HHHIHHII' - - def __init__(self, session_key, username, password, is_unicode, domain): - self.username = username - self.session_key = session_key - self.password = password - self.is_unicode = is_unicode - self.domain = domain - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_SESSION_SETUP_ANDX - - def prepare(self, message): - if self.is_unicode: - message.flags2 |= SMB_FLAGS2_UNICODE - else: - message.flags2 &= (~SMB_FLAGS2_UNICODE & 0xFFFF) - - password_len = len(self.password) - message.parameters_data = \ - self.DEFAULT_ANDX_PARAM_HEADER + \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - 16644, 10, 0, self.session_key, - (not self.is_unicode and password_len) or 0, - (self.is_unicode and password_len) or 0, - 0, - CAP_UNICODE | CAP_LARGE_FILES | CAP_STATUS32) - - est_offset = SMBMessage.HEADER_STRUCT_SIZE + len(message.parameters_data) # To check if data until SMB paramaters are aligned to a 16-bit boundary - - message.data = self.password - if (est_offset + len(message.data)) % 2 != 0 and message.flags2 & SMB_FLAGS2_UNICODE: - message.data = message.data + '\0' - - if message.flags2 & SMB_FLAGS2_UNICODE: - message.data = message.data + self.username.encode('UTF-16LE') + '\0' - else: - message.data = message.data + str(self.username) + '\0' - - if (est_offset + len(message.data)) % 2 != 0 and message.flags2 & SMB_FLAGS2_UNICODE: - message.data = message.data + '\0' - - if message.flags2 & SMB_FLAGS2_UNICODE: - message.data = message.data + self.domain.encode('UTF-16LE') + '\0\0' + 'pysmb'.encode('UTF-16LE') + '\0\0' - else: - message.data = message.data + self.domain + '\0pysmb\0' - - -class ComSessionSetupAndxResponse(Payload): - """ - Contains information on the SMB_COM_SESSION_SETUP_ANDX response from server - - If the underlying SMB message's flags2 does not have SMB_FLAGS2_EXTENDED_SECURITY bit enabled, - then the instance will have the following attributes, - - action - - If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled - and the message status is STATUS_MORE_PROCESSING_REQUIRED or equals to 0x00 (no error), - then the instance will have the following attributes, - - action - - securityblob - - If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled but - the message status is not STATUS_MORE_PROCESSING_REQUIRED - - References: - =========== - - [MS-SMB]: 2.2.4.6.2 - - [MS-CIFS]: 2.2.4.53.2 - """ - - NOSECURE_PARAMETER_STRUCT_FORMAT = '<BBHH' - NOSECURE_PARAMETER_STRUCT_SIZE = struct.calcsize(NOSECURE_PARAMETER_STRUCT_FORMAT) - - SECURE_PARAMETER_STRUCT_FORMAT = '<BBHHH' - SECURE_PARAMETER_STRUCT_SIZE = struct.calcsize(SECURE_PARAMETER_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_SESSION_SETUP_ANDX - if not message.hasExtendedSecurity: - if not message.status.hasError: - if len(message.parameters_data) < self.NOSECURE_PARAMETER_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (no security extensions) parameters', message.raw_data, message) - - _, _, _, self.action = struct.unpack(self.NOSECURE_PARAMETER_STRUCT_FORMAT, message.parameters_data[:self.NOSECURE_PARAMETER_STRUCT_SIZE]) - else: - if not message.status.hasError or message.status.internal_value == 0xc0000016: # STATUS_MORE_PROCESSING_REQUIRED - if len(message.parameters_data) < self.SECURE_PARAMETER_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (with security extensions) parameters', message.raw_data, message) - - _, _, _, self.action, blob_length = struct.unpack(self.SECURE_PARAMETER_STRUCT_FORMAT, message.parameters_data[:self.SECURE_PARAMETER_STRUCT_SIZE]) - if len(message.data) < blob_length: - raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (with security extensions) security blob', message.raw_data, message) - - self.security_blob = message.data[:blob_length] - - -class ComTreeConnectAndxRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.55.1 - - [MS-SMB]: 2.2.4.7.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, path, service, password = ''): - self.path = path - self.service = service - self.password = password + '\0' - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_TREE_CONNECT_ANDX - - def prepare(self, message): - password_len = len(self.password) - message.parameters_data = \ - self.DEFAULT_ANDX_PARAM_HEADER + \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - 0x08 | \ - ((message.hasExtendedSecurity and 0x0004) or 0x00) | \ - ((message.tid and message.tid != 0xFFFF and 0x0001) or 0x00), # Disconnect tid, if message.tid must be non-zero - password_len) - - padding = '' - if password_len % 2 == 0: - padding = '\0' - - # Note that service field is never encoded in UTF-16LE. [MS-CIFS]: 2.2.1.1 - message.data = self.password + padding + self.path.encode('UTF-16LE') + '\0\0' + self.service + '\0' - - -class ComTreeConnectAndxResponse(Payload): - """ - Contains information about the SMB_COM_TREE_CONNECT_ANDX response from the server. - - If the message has no errors, each instance contains the following attributes: - - optional_support - - References: - =========== - - [MS-CIFS]: 2.2.4.55.2 - """ - - PAYLOAD_STRUCT_FORMAT = '<BBHH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_TREE_CONNECT_ANDX - - if not message.status.hasError: - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_TREE_CONNECT_ANDX parameters', message.raw_data, message) - - _, _, _, self.optional_support = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - - -class ComNTCreateAndxRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.64.1 - - [MS-SMB]: 2.2.4.9.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<BHIIIQIIIIIB' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, filename, flags = 0, root_fid = 0, access_mask = 0, allocation_size = 0L, ext_attr = 0, - share_access = 0, create_disp = 0, create_options = 0, impersonation = 0, security_flags = 0): - self.filename = (filename + '\0').encode('UTF-16LE') - self.flags = flags - self.root_fid = root_fid - self.access_mask = access_mask - self.allocation_size = allocation_size - self.ext_attr = ext_attr - self.share_access = share_access - self.create_disp = create_disp - self.create_options = create_options - self.impersonation = impersonation - self.security_flags = security_flags - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_NT_CREATE_ANDX - - def prepare(self, message): - filename_len = len(self.filename) - - message.parameters_data = \ - self.DEFAULT_ANDX_PARAM_HEADER + \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - 0x00, # reserved - filename_len, # NameLength - self.flags, # Flags - self.root_fid, # RootDirectoryFID - self.access_mask, # DesiredAccess - self.allocation_size, # AllocationSize - self.ext_attr, # ExtFileAttributes - self.share_access, # ShareAccess - self.create_disp, # CreateDisposition - self.create_options, # CreateOptions - self.impersonation, # ImpersonationLevel - self.security_flags) # SecurityFlags - - padding = '' - if (message.HEADER_STRUCT_SIZE + len(message.parameters_data)) % 2 != 0: - padding = '\0' - - message.data = padding + self.filename - - -class ComNTCreateAndxResponse(Payload): - """ - Contains (partial) information about the SMB_COM_NT_CREATE_ANDX response from the server. - - Each instance contains the following attributes after decoding: - - oplock_level - - fid - - References: - =========== - - [MS-CIFS]: 2.2.4.64.2 - """ - PAYLOAD_STRUCT_FORMAT = '<BBHBH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_NT_CREATE_ANDX - - if not message.status.hasError: - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_NT_CREATE_ANDX parameters', message.raw_data, message) - - _, _, _, self.oplock_level, self.fid = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - - -class ComTransactionRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.33.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HHHHBBHIHHHHHH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, max_params_count, max_data_count, max_setup_count, - total_params_count = 0, total_data_count = 0, - params_bytes = '', data_bytes = '', setup_bytes = '', - flags = 0, timeout = 0, name = "\\PIPE\\"): - self.total_params_count = total_params_count or len(params_bytes) - self.total_data_count = total_data_count or len(data_bytes) - self.max_params_count = max_params_count - self.max_data_count = max_data_count - self.max_setup_count = max_setup_count - self.flags = flags - self.timeout = timeout - self.params_bytes = params_bytes - self.data_bytes = data_bytes - self.setup_bytes = setup_bytes - self.name = name - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_TRANSACTION - - def prepare(self, message): - name = (self.name + '\0').encode('UTF-16LE') - name_len = len(name) - setup_bytes_len = len(self.setup_bytes) - params_bytes_len = len(self.params_bytes) - data_bytes_len = len(self.data_bytes) - - padding0 = '' - offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) - if offset % 2 != 0: - padding0 = '\0' - offset += 1 - - offset += name_len # For the name field - padding1 = '' - if offset % 4 != 0: - padding1 = '\0'*(4-offset%4) - offset += (4-offset%4) - - if params_bytes_len > 0: - params_bytes_offset = offset - offset += params_bytes_len - else: - params_bytes_offset = 0 - - padding2 = '' - if offset % 4 != 0: - padding2 = '\0'*(4-offset%4) - offset += (4-offset%4) - - if data_bytes_len > 0: - data_bytes_offset = offset - else: - data_bytes_offset = 0 - - message.parameters_data = \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - self.total_params_count, - self.total_data_count, - self.max_params_count, - self.max_data_count, - self.max_setup_count, - 0x00, # Reserved1. Must be 0x00 - self.flags, - self.timeout, - 0x0000, # Reserved2. Must be 0x0000 - params_bytes_len, - params_bytes_offset, - data_bytes_len, - data_bytes_offset, - int(setup_bytes_len / 2)) + \ - self.setup_bytes - - message.data = padding0 + name + padding1 + self.params_bytes + padding2 + self.data_bytes - - -class ComTransactionResponse(Payload): - """ - Contains information about a SMB_COM_TRANSACTION response from the server - - After decoding, each instance contains the following attributes: - - total_params_count (integer) - - total_data_count (integer) - - setup_bytes (string) - - data_bytes (string) - - params_bytes (string) - - References: - =========== - - [MS-CIFS]: 2.2.4.33.2 - """ - - PAYLOAD_STRUCT_FORMAT = '<HHHHHHHHHH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_TRANSACTION - - if not message.status.hasError: - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message) - - self.total_params_count, self.total_data_count, _, \ - params_bytes_len, params_bytes_offset, params_bytes_displ, \ - data_bytes_len, data_bytes_offset, data_bytes_displ, \ - setup_count = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - - if setup_count > 0: - setup_bytes_len = setup_count * 2 - - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE + setup_bytes_len: - raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message) - - self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_bytes_len] - else: - self.setup_bytes = '' - - offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count * 2 + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) - - if params_bytes_len > 0: - self.params_bytes = message.data[params_bytes_offset-offset:params_bytes_offset-offset+params_bytes_len] - else: - self.params_bytes = '' - - if data_bytes_len > 0: - self.data_bytes = message.data[data_bytes_offset-offset:data_bytes_offset-offset+data_bytes_len] - else: - self.data_bytes = '' - - -class ComTransaction2Request(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.46.1 - """ - - PAYLOAD_STRUCT_FORMAT = 'HHHHBBHIHHHHHH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, max_params_count, max_data_count, max_setup_count, - total_params_count = 0, total_data_count = 0, - params_bytes = '', data_bytes = '', setup_bytes = '', - flags = 0, timeout = 0): - self.total_params_count = total_params_count or len(params_bytes) - self.total_data_count = total_data_count or len(data_bytes) - self.max_params_count = max_params_count - self.max_data_count = max_data_count - self.max_setup_count = max_setup_count - self.flags = flags - self.timeout = timeout - self.params_bytes = params_bytes - self.data_bytes = data_bytes - self.setup_bytes = setup_bytes - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_TRANSACTION2 - - def prepare(self, message): - setup_bytes_len = len(self.setup_bytes) - params_bytes_len = len(self.params_bytes) - data_bytes_len = len(self.data_bytes) - name = '\0\0' - - padding0 = '' - offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) - if offset % 2 != 0: - padding0 = '\0' - offset += 1 - - offset += 2 # For the name field - padding1 = '' - if offset % 4 != 0: - padding1 = '\0'*(4-offset%4) - - if params_bytes_len > 0: - params_bytes_offset = offset - offset += params_bytes_len - else: - params_bytes_offset = 0 - - padding2 = '' - if offset % 4 != 0: - padding2 = '\0'*(4-offset%4) - - if data_bytes_len > 0: - data_bytes_offset = offset - else: - data_bytes_offset = 0 - - message.parameters_data = \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - self.total_params_count, - self.total_data_count, - self.max_params_count, - self.max_data_count, - self.max_setup_count, - 0x00, # Reserved1. Must be 0x00 - self.flags, - self.timeout, - 0x0000, # Reserved2. Must be 0x0000 - params_bytes_len, - params_bytes_offset, - data_bytes_len, - data_bytes_offset, - int(setup_bytes_len / 2)) + \ - self.setup_bytes - - message.data = padding0 + name + padding1 + self.params_bytes + padding2 + self.data_bytes - - -class ComTransaction2Response(Payload): - """ - Contains information about a SMB_COM_TRANSACTION2 response from the server - - After decoding, each instance contains the following attributes: - - total_params_count (integer) - - total_data_count (integer) - - setup_bytes (string) - - data_bytes (string) - - params_bytes (string) - - References: - =========== - - [MS-CIFS]: 2.2.4.46.2 - """ - - PAYLOAD_STRUCT_FORMAT = '<HHHHHHHHHBB' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_TRANSACTION2 - - if not message.status.hasError: - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION2 parameters', message.raw_data, message) - - self.total_params_count, self.total_data_count, _, \ - params_bytes_len, params_bytes_offset, params_bytes_displ, \ - data_bytes_len, data_bytes_offset, data_bytes_displ, \ - setup_count, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - - if setup_count > 0: - setup_bytes_len = setup_count * 2 - - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE + setup_bytes_len: - raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message) - - self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_bytes_len] - else: - self.setup_bytes = '' - - offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count * 2 + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) - - if params_bytes_len > 0: - self.params_bytes = message.data[params_bytes_offset-offset:params_bytes_offset-offset+params_bytes_len] - else: - self.params_bytes = '' - - if data_bytes_len > 0: - self.data_bytes = message.data[data_bytes_offset-offset:data_bytes_offset-offset+data_bytes_len] - else: - self.data_bytes = '' - - -class ComCloseRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.5.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HI' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, fid, last_modified_time = 0xFFFFFFFF): - self.fid = fid - self.last_modified_time = last_modified_time - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_CLOSE - - def prepare(self, message): - message.parameters_data = struct.pack(self.PAYLOAD_STRUCT_FORMAT, self.fid, self.last_modified_time) - message.data = '' - - -class ComOpenAndxRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.41.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HHHHIHIII' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, filename, access_mode, open_mode, flags = 0x0000, search_attributes = 0, file_attributes = 0, create_time = 0, timeout = 0): - """ - @param create_time: Epoch time value to indicate the time of creation for this file. If zero, we will automatically assign the current time - @type create_time: int - @param timeout: Number of milliseconds to wait for blocked open request before failing - @type timeout: int - """ - self.filename = filename - self.access_mode = access_mode - self.open_mode = open_mode - self.flags = flags - self.search_attributes = search_attributes - self.file_attributes = file_attributes - self.create_time = create_time or int(time.time()) - self.timeout = timeout - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_OPEN_ANDX - - def prepare(self, message): - message.parameters_data = \ - self.DEFAULT_ANDX_PARAM_HEADER + \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - self.flags, - self.access_mode, - self.search_attributes, - self.file_attributes, - self.create_time, - self.open_mode, - 0, # AllocationSize - 0, # Timeout (in milli-secs) - 0) # Reserved - - message.data = '\0' + self.filename.encode('UTF-16LE') + '\0\0' - - -class ComOpenAndxResponse(Payload): - """ - Contains information about a SMB_COM_OPEN_ANDX response from the server - - After decoding, each instance will contain the following attributes: - - fid (integer) - - file_attributes (integer) - - last_write_time (long) - - access_rights (integer) - - resource_type (integer) - - open_results (integer) - - References: - =========== - - [MS-CIFS]: 2.2.4.41.2 - - [MS-SMB]: 2.2.4.1.2 - """ - - PAYLOAD_STRUCT_FORMAT = '<BBHHHIIHHHHHHH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_OPEN_ANDX - - if not message.status.hasError: - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_OPEN_ANDX parameters', message.raw_data, message) - - _, _, _, self.fid, self.file_attributes, self.last_write_time, _, \ - self.access_rights, self.resource_type, _, self.open_results, _, _, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, - message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - - -class ComWriteAndxRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.43.1 - - [MS-SMB]: 2.2.4.3.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HIIHHHHHI' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, fid, data_bytes, offset, write_mode = 0, timeout = 0): - """ - @param timeout: Number of milliseconds to wait for blocked write request before failing. Must be zero for writing to regular file - @type timeout: int - """ - self.fid = fid - self.offset = offset - self.data_bytes = data_bytes - self.timeout = timeout - self.write_mode = write_mode - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_WRITE_ANDX - - def prepare(self, message): - # constant 1 is to account for the pad byte in the message.data - # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) - data_offset = message.HEADER_STRUCT_SIZE + self.DEFAULT_ANDX_PARAM_SIZE + self.PAYLOAD_STRUCT_SIZE + 1 + 2 - data_len = len(self.data_bytes) - - message.parameters_data = \ - self.DEFAULT_ANDX_PARAM_HEADER + \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - self.fid, - self.offset & 0xFFFFFFFF, - self.timeout, - self.write_mode, - data_len, # Remaining - 0x0000, # Reserved - len(self.data_bytes), # DataLength - data_offset, # DataOffset - self.offset >> 32) # OffsetHigh field defined in [MS-SMB]: 2.2.4.3.1 - - message.data = '\0' + self.data_bytes - - -class ComWriteAndxResponse(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.43.2 - - [MS-SMB]: 2.2.4.3.2 - """ - - PAYLOAD_STRUCT_FORMAT = '<BBHHHHH' # We follow the SMB_COM_WRITEX_ANDX server extensions in [MS-SMB]: 2.2.4.3.2 - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_WRITE_ANDX - - if not message.status.hasError: - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_WRITE_ANDX parameters', message.raw_data, message) - - _, _, _, count, self.available, high_count, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - self.count = (count & 0xFFFF) | (high_count << 16) - - -class ComReadAndxRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.42.1 - - [MS-SMB]: 2.2.4.2.1 - """ - - PAYLOAD_STRUCT_FORMAT = '<HIHHIHI' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, fid, offset, max_return_bytes_count, min_return_bytes_count, timeout = 0, remaining = 0): - """ - @param timeout: If reading from a regular file, this parameter must be 0. - @type timeout: int - """ - self.fid = fid - self.remaining = remaining - self.max_return_bytes_count = max_return_bytes_count - self.min_return_bytes_count = min_return_bytes_count - self.offset = offset - self.timeout = timeout - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_READ_ANDX - - def prepare(self, message): - message.parameters_data = \ - self.DEFAULT_ANDX_PARAM_HEADER + \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - self.fid, - self.offset & 0xFFFFFFFF, - self.max_return_bytes_count, - self.min_return_bytes_count, - self.timeout or (self.max_return_bytes_count >> 32), # Note that in [MS-SMB]: 2.2.4.2.1, this field can also act as MaxCountHigh field - self.remaining, # In [MS-CIFS]: 2.2.4.42.1, this field must be set to 0x0000 - self.offset >> 32) - - message.data = '' - - -class ComReadAndxResponse(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.42.2 - - [MS-SMB]: 2.2.4.2.2 - """ - - PAYLOAD_STRUCT_FORMAT = '<BBHHHHHHHHHHH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_READ_ANDX - - if not message.status.hasError: - if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: - raise ProtocolError('Not enough data to decode SMB_COM_READ_ANDX parameters', message.raw_data, message) - - _, _, _, _, _, _, self.data_length, data_offset, _, _, _, _, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, - message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - - offset = data_offset - message.HEADER_STRUCT_SIZE - self.PAYLOAD_STRUCT_SIZE - 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) - self.data = message.data[offset:offset+self.data_length] - assert len(self.data) == self.data_length - - -class ComDeleteRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.7.1 - """ - - def __init__(self, filename_pattern, search_attributes = 0): - self.filename_pattern = filename_pattern - self.search_attributes = search_attributes - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_DELETE - - def prepare(self, message): - message.parameters_data = struct.pack('<H', self.search_attributes) - message.data = '\x04' + self.filename_pattern.encode('UTF-16LE') + '\0\0' - - -class ComCreateDirectoryRequest(Payload): - """ - Although this command has been marked deprecated in [MS-CIFS], we continue to use it for its simplicity - as compared to its replacement TRANS2_CREATE_DIRECTORY sub-command [MS-CIFS]: 2.2.6.14 - - References: - =========== - - [MS-CIFS]: 2.2.4.1.1 - """ - - def __init__(self, path): - self.path = path - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_CREATE_DIRECTORY - - def prepare(self, message): - message.parameters_data = '' - message.data = '\x04' + self.path.encode('UTF-16LE') + '\0\0' - - -class ComDeleteDirectoryRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.2.1 - """ - - def __init__(self, path): - self.path = path - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_DELETE_DIRECTORY - - def prepare(self, message): - message.parameters_data = '' - message.data = '\x04' + self.path.encode('UTF-16LE') + '\0\0' - - -class ComRenameRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.8.1 - """ - - def __init__(self, old_path, new_path, search_attributes = 0): - self.old_path = old_path - self.new_path = new_path - self.search_attributes = search_attributes - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_RENAME - - def prepare(self, message): - message.parameters_data = struct.pack('<H', self.search_attributes) - message.data = '\x04' + self.old_path.encode('UTF-16LE') + '\x00\x00\x04\x00' + self.new_path.encode('UTF-16LE') + '\x00\x00' - - -class ComEchoRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.39.1 - """ - - def __init__(self, echo_data = '', echo_count = 1): - self.echo_count = echo_count - self.echo_data = echo_data - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_ECHO - message.tid = 0xFFFF - - def prepare(self, message): - message.parameters_data = struct.pack('<H', self.echo_count) - message.data = self.echo_data - - -class ComEchoResponse(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.39.2 - """ - - def decode(self, message): - self.sequence_number = struct.unpack('<H', message.parameters_data[:2])[0] - self.data = message.data - - -class ComNTTransactRequest(Payload): - """ - References: - =========== - - [MS-CIFS]: 2.2.4.62.1 - """ - PAYLOAD_STRUCT_FORMAT = '<BHIIIIIIIIBH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def __init__(self, function, max_params_count, max_data_count, max_setup_count, - total_params_count = 0, total_data_count = 0, - params_bytes = '', setup_bytes = '', data_bytes = ''): - self.function = function - self.total_params_count = total_params_count or len(params_bytes) - self.total_data_count = total_data_count or len(data_bytes) - self.max_params_count = max_params_count - self.max_data_count = max_data_count - self.max_setup_count = max_setup_count - self.params_bytes = params_bytes - self.setup_bytes = setup_bytes - self.data_bytes = data_bytes - - def initMessage(self, message): - Payload.initMessage(self, message) - message.command = SMB_COM_NT_TRANSACT - - def prepare(self, message): - setup_bytes_len = len(self.setup_bytes) - params_bytes_len = len(self.params_bytes) - data_bytes_len = len(self.data_bytes) - - padding0 = '' - offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) - if offset % 4 != 0: - padding0 = '\0'*(4-offset%4) - offset += (4-offset%4) - - if params_bytes_len > 0: - params_bytes_offset = offset - else: - params_bytes_offset = 0 - - offset += params_bytes_len - padding1 = '' - if offset % 4 != 0: - padding1 = '\0'*(4-offset%4) - offset += (4-offset%4) - - if data_bytes_len > 0: - data_bytes_offset = offset - else: - data_bytes_offset = 0 - - message.parameters_data = \ - struct.pack(self.PAYLOAD_STRUCT_FORMAT, - self.max_setup_count, - 0x00, # Reserved1. Must be 0x00 - self.total_params_count, - self.total_data_count, - self.max_params_count, - self.max_data_count, - params_bytes_len, - params_bytes_offset, - data_bytes_len, - data_bytes_offset, - int(setup_bytes_len / 2), - self.function) + \ - self.setup_bytes - - message.data = padding0 + self.params_bytes + padding1 + self.data_bytes - - -class ComNTTransactResponse(Payload): - """ - Contains information about a SMB_COM_NT_TRANSACT response from the server - - After decoding, each instance contains the following attributes: - - total_params_count (integer) - - total_data_count (integer) - - setup_bytes (string) - - data_bytes (string) - - params_bytes (string) - - References: - =========== - - [MS-CIFS]: 2.2.4.62.2 - """ - PAYLOAD_STRUCT_FORMAT = '<3sIIIIIIIIBH' - PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) - - def decode(self, message): - assert message.command == SMB_COM_NT_TRANSACT - - if not message.status.hasError: - _, self.total_params_count, self.total_data_count, \ - params_count, params_offset, params_displ, \ - data_count, data_offset, data_displ, setup_count = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, - message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) - - self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_count*2] - - if params_count > 0: - params_offset -= message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count*2 + 2 - self.params_bytes = message.data[params_offset:params_offset+params_count] - else: - self.params_bytes = '' - - if data_count > 0: - data_offset -= message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count*2 + 2 - self.data_bytes = message.data[data_offset:data_offset+data_count] - else: - self.data_bytes = '' + +import os, sys, struct, types, logging, binascii, time +from StringIO import StringIO +from smb_constants import * + + +# Set to True if you want to enable support for extended security. Required for Windows Vista and later +SUPPORT_EXTENDED_SECURITY = True + +# Set to True if you want to enable SMB2 protocol. +SUPPORT_SMB2 = True + +# Set to True if you want to enable SMB2.1 and above protocol. +SUPPORT_SMB2x = True + +# Supported dialects +NT_LAN_MANAGER_DIALECT = 0 # 'NT LM 0.12' is always the first element in the dialect list and must always be included (MS-SMB 2.2.4.5.1) + +# Return the list of support SMB dialects based on the SUPPORT_x constants +def init_dialects_list(): + dialects = [ 'NT LM 0.12' ] + if SUPPORT_SMB2: + dialects.append('SMB 2.002') + if SUPPORT_SMB2x: + dialects.append('SMB 2.???') + return dialects + +class UnsupportedFeature(Exception): + """ + Raised when an supported feature is present/required in the protocol but is not + currently supported by pysmb + """ + pass + + +class ProtocolError(Exception): + + def __init__(self, message, data_buf = None, smb_message = None): + self.message = message + self.data_buf = data_buf + self.smb_message = smb_message + + def __str__(self): + b = StringIO() + b.write(self.message + os.linesep) + if self.smb_message: + b.write('=' * 20 + ' SMB Message ' + '=' * 20 + os.linesep) + b.write(str(self.smb_message)) + + if self.data_buf: + b.write('=' * 20 + ' SMB Data Packet (hex) ' + '=' * 20 + os.linesep) + b.write(binascii.hexlify(self.data_buf)) + b.write(os.linesep) + + return b.getvalue() + +class SMB2ProtocolHeaderError(ProtocolError): + + def __init__(self): + ProtocolError.__init__(self, "Packet header belongs to SMB2") + +class OperationFailure(Exception): + + def __init__(self, message, smb_messages): + self.args = [ message ] + self.message = message + self.smb_messages = smb_messages + + def __str__(self): + b = StringIO() + b.write(self.message + os.linesep) + + for idx, m in enumerate(self.smb_messages): + b.write('=' * 20 + ' SMB Message %d ' % idx + '=' * 20 + os.linesep) + b.write('SMB Header:' + os.linesep) + b.write('-----------' + os.linesep) + b.write(str(m)) + b.write('SMB Data Packet (hex):' + os.linesep) + b.write('----------------------' + os.linesep) + b.write(binascii.hexlify(m.raw_data)) + b.write(os.linesep) + + return b.getvalue() + + +class SMBError: + + def __init__(self): + self.reset() + + def reset(self): + self.internal_value = 0L + self.is_ntstatus = True + + def __str__(self): + if self.is_ntstatus: + return 'NTSTATUS=0x%08X' % self.internal_value + else: + return 'ErrorClass=0x%02X ErrorCode=0x%04X' % ( self.internal_value >> 24, self.internal_value & 0xFFFF ) + + @property + def hasError(self): + return self.internal_value != 0 + + +class SMBMessage: + + HEADER_STRUCT_FORMAT = "<4sBIBHHQxxHHHHB" + HEADER_STRUCT_SIZE = struct.calcsize(HEADER_STRUCT_FORMAT) + + log = logging.getLogger('SMB.SMBMessage') + protocol = 1 + + def __init__(self, conn, payload = None): + self.reset() + self.conn = conn + if payload: + self.payload = payload + self.payload.initMessage(self) + + def __str__(self): + b = StringIO() + b.write('Command: 0x%02X (%s) %s' % ( self.command, SMB_COMMAND_NAMES.get(self.command, '<unknown>'), os.linesep )) + b.write('Status: %s %s' % ( str(self.status), os.linesep )) + b.write('Flags: 0x%02X %s' % ( self.flags, os.linesep )) + b.write('Flags2: 0x%04X %s' % ( self.flags2, os.linesep )) + b.write('PID: %d %s' % ( self.pid, os.linesep )) + b.write('UID: %d %s' % ( self.uid, os.linesep )) + b.write('MID: %d %s' % ( self.mid, os.linesep )) + b.write('TID: %d %s' % ( self.tid, os.linesep )) + b.write('Security: 0x%016X %s' % ( self.security, os.linesep )) + b.write('Parameters: %d bytes %s%s %s' % ( len(self.parameters_data), os.linesep, binascii.hexlify(self.parameters_data), os.linesep )) + b.write('Data: %d bytes %s%s %s' % ( len(self.data), os.linesep, binascii.hexlify(self.data), os.linesep )) + return b.getvalue() + + def reset(self): + self.raw_data = '' + self.command = 0 + self.status = SMBError() + self.flags = 0 + self.flags2 = 0 + self.pid = 0 + self.tid = 0 + self.uid = 0 + self.mid = 0 + self.security = 0L + self.parameters_data = '' + self.data = '' + self.payload = None + + @property + def isReply(self): + return bool(self.flags & SMB_FLAGS_REPLY) + + @property + def hasExtendedSecurity(self): + return bool(self.flags2 & SMB_FLAGS2_EXTENDED_SECURITY) + + def encode(self): + """ + Encode this SMB message into a series of bytes suitable to be embedded with a NetBIOS session message. + AssertionError will be raised if this SMB message has not been initialized with a Payload instance + + @return: a string containing the encoded SMB message + """ + assert self.payload + + self.pid = os.getpid() + self.payload.prepare(self) + + parameters_len = len(self.parameters_data) + assert parameters_len % 2 == 0 + + headers_data = struct.pack(self.HEADER_STRUCT_FORMAT, + '\xFFSMB', self.command, self.status.internal_value, self.flags, + self.flags2, (self.pid >> 16) & 0xFFFF, self.security, self.tid, + self.pid & 0xFFFF, self.uid, self.mid, int(parameters_len / 2)) + return headers_data + self.parameters_data + struct.pack('<H', len(self.data)) + self.data + + def decode(self, buf): + """ + Decodes the SMB message in buf. + All fields of the SMBMessage object will be reset to default values before decoding. + On errors, do not assume that the fields will be reinstated back to what they are before + this method is invoked. + + @param buf: data containing one complete SMB message + @type buf: string + @return: a positive integer indicating the number of bytes used in buf to decode this SMB message + @raise ProtocolError: raised when decoding fails + """ + buf_len = len(buf) + if buf_len < self.HEADER_STRUCT_SIZE: + # We need at least 32 bytes (header) + 1 byte (parameter count) + raise ProtocolError('Not enough data to decode SMB header', buf) + + self.reset() + + protocol, self.command, status, self.flags, \ + self.flags2, pid_high, self.security, self.tid, \ + pid_low, self.uid, self.mid, params_count = struct.unpack(self.HEADER_STRUCT_FORMAT, buf[:self.HEADER_STRUCT_SIZE]) + + if protocol == '\xFESMB': + raise SMB2ProtocolHeaderError() + if protocol != '\xFFSMB': + raise ProtocolError('Invalid 4-byte protocol field', buf) + + self.pid = (pid_high << 16) | pid_low + self.status.internal_value = status + self.status.is_ntstatus = bool(self.flags2 & SMB_FLAGS2_NT_STATUS) + + offset = self.HEADER_STRUCT_SIZE + if buf_len < params_count * 2 + 2: + # Not enough data in buf to decode up to body length + raise ProtocolError('Not enough data. Parameters list decoding failed', buf) + + datalen_offset = offset + params_count*2 + body_len = struct.unpack('<H', buf[datalen_offset:datalen_offset+2])[0] + if body_len > 0 and buf_len < (datalen_offset + 2 + body_len): + # Not enough data in buf to decode body + raise ProtocolError('Not enough data. Body decoding failed', buf) + + self.parameters_data = buf[offset:datalen_offset] + + if body_len > 0: + self.data = buf[datalen_offset+2:datalen_offset+2+body_len] + + self.raw_data = buf + self._decodePayload() + + return self.HEADER_STRUCT_SIZE + params_count * 2 + 2 + body_len + + def _decodePayload(self): + if self.command == SMB_COM_READ_ANDX: + self.payload = ComReadAndxResponse() + elif self.command == SMB_COM_WRITE_ANDX: + self.payload = ComWriteAndxResponse() + elif self.command == SMB_COM_TRANSACTION: + self.payload = ComTransactionResponse() + elif self.command == SMB_COM_TRANSACTION2: + self.payload = ComTransaction2Response() + elif self.command == SMB_COM_OPEN_ANDX: + self.payload = ComOpenAndxResponse() + elif self.command == SMB_COM_NT_CREATE_ANDX: + self.payload = ComNTCreateAndxResponse() + elif self.command == SMB_COM_TREE_CONNECT_ANDX: + self.payload = ComTreeConnectAndxResponse() + elif self.command == SMB_COM_ECHO: + self.payload = ComEchoResponse() + elif self.command == SMB_COM_SESSION_SETUP_ANDX: + self.payload = ComSessionSetupAndxResponse() + elif self.command == SMB_COM_NEGOTIATE: + self.payload = ComNegotiateResponse() + + if self.payload: + self.payload.decode(self) + + +class Payload: + + DEFAULT_ANDX_PARAM_HEADER = '\xFF\x00\x00\x00' + DEFAULT_ANDX_PARAM_SIZE = 4 + + def initMessage(self, message): + # SMB_FLAGS2_UNICODE must always be enabled. Without this, almost all the Payload subclasses will need to be + # rewritten to check for OEM/Unicode strings which will be tedious. Fortunately, almost all tested CIFS services + # support SMB_FLAGS2_UNICODE by default. + assert message.payload == self + message.flags = SMB_FLAGS_CASE_INSENSITIVE | SMB_FLAGS_CANONICALIZED_PATHS + message.flags2 = SMB_FLAGS2_UNICODE | SMB_FLAGS2_NT_STATUS | SMB_FLAGS2_LONG_NAMES | SMB_FLAGS2_EAS + + if SUPPORT_EXTENDED_SECURITY: + message.flags2 |= SMB_FLAGS2_EXTENDED_SECURITY | SMB_FLAGS2_SMB_SECURITY_SIGNATURE + + def prepare(self, message): + raise NotImplementedError + + def decode(self, message): + raise NotImplementedError + + +class ComNegotiateRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.52.1 + - [MS-SMB]: 2.2.4.5.1 + """ + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_NEGOTIATE + + def prepare(self, message): + assert message.payload == self + message.parameters_data = '' + message.data = ''.join(map(lambda s: '\x02'+s+'\x00', init_dialects_list())) + + +class ComNegotiateResponse(Payload): + """ + Contains information on the SMB_COM_NEGOTIATE response from server + + After calling the decode method, each instance will contain the following attributes, + - security_mode (integer) + - max_mpx_count (integer) + - max_number_vcs (integer) + - max_buffer_size (long) + - max_raw_size (long) + - session_key (long) + - capabilities (long) + - system_time (long) + - server_time_zone (integer) + - challenge_length (integer) + + If the underlying SMB message's flag2 does not have SMB_FLAGS2_EXTENDED_SECURITY bit enabled, + then the instance will have the following additional attributes, + - challenge (string) + - domain (unicode) + + If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled, + then the instance will have the following additional attributes, + - server_guid (string) + - security_blob (string) + + References: + =========== + - [MS-SMB]: 2.2.4.5.2.1 + - [MS-CIFS]: 2.2.4.52.2 + """ + + PAYLOAD_STRUCT_FORMAT = '<HBHHIIIIQHB' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_NEGOTIATE + + if not message.isReply: + raise ProtocolError('Not a SMB_COM_NEGOTIATE reply', message.raw_data, message) + + self.security_mode, self.max_mpx_count, self.max_number_vcs, self.max_buffer_size, \ + self.max_raw_size, self.session_key, self.capabilities, self.system_time, self.server_time_zone, \ + self.challenge_length = ( 0, ) * 10 + + data_len = len(message.parameters_data) + if data_len < 2: + raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE dialect_index field', message.raw_data, message) + + self.dialect_index = struct.unpack('<H', message.parameters_data[:2])[0] + if self.dialect_index == NT_LAN_MANAGER_DIALECT: + if data_len != (0x11 * 2): + raise ProtocolError('NT LAN Manager dialect selected in SMB_COM_NEGOTIATE but parameters bytes count (%d) does not meet specs' % data_len, + message.raw_data, message) + else: + _, self.security_mode, self.max_mpx_count, self.max_number_vcs, self.max_buffer_size, \ + self.max_raw_size, self.session_key, self.capabilities, self.system_time, self.server_time_zone, \ + self.challenge_length = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + elif self.dialect_index == 0xFFFF: + raise ProtocolError('Server does not support any of the pysmb dialects. Please email pysmb to add in support for your OS', + message.raw_data, message) + else: + raise ProtocolError('Unknown dialect index (0x%04X)' % self.dialect_index, message.raw_data, message) + + data_len = len(message.data) + if not message.hasExtendedSecurity: + self.challenge, self.domain = '', '' + if self.challenge_length > 0: + if data_len >= self.challenge_length: + self.challenge = message.data[:self.challenge_length] + + s = '' + offset = self.challenge_length + while offset < data_len: + _s = message.data[offset:offset+2] + if _s == '\0\0': + self.domain = s.decode('UTF-16LE') + break + else: + s += _s + offset += 2 + else: + raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE (without security extensions) Challenge field', message.raw_data, message) + else: + if data_len < 16: + raise ProtocolError('Not enough data to decode SMB_COM_NEGOTIATE (with security extensions) ServerGUID field', message.raw_data, message) + + self.server_guid = message.data[:16] + self.security_blob = message.data[16:] + + @property + def supportsExtendedSecurity(self): + return bool(self.capabilities & CAP_EXTENDED_SECURITY) + + +class ComSessionSetupAndxRequest__WithSecurityExtension(Payload): + """ + References: + =========== + - [MS-SMB]: 2.2.4.6.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HHHIHII' + + def __init__(self, session_key, security_blob): + self.session_key = session_key + self.security_blob = security_blob + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_SESSION_SETUP_ANDX + + def prepare(self, message): + assert message.hasExtendedSecurity + + message.flags2 |= SMB_FLAGS2_UNICODE + + cap = CAP_UNICODE | CAP_STATUS32 | CAP_EXTENDED_SECURITY | CAP_NT_SMBS + + message.parameters_data = \ + self.DEFAULT_ANDX_PARAM_HEADER + \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + 16644, 10, 1, self.session_key, len(self.security_blob), 0, cap) + + message.data = self.security_blob + if (SMBMessage.HEADER_STRUCT_SIZE + len(message.parameters_data) + len(message.data)) % 2 != 0: + message.data = message.data + '\0' + message.data = message.data + '\0' * 4 + + +class ComSessionSetupAndxRequest__NoSecurityExtension(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.53.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HHHIHHII' + + def __init__(self, session_key, username, password, is_unicode, domain): + self.username = username + self.session_key = session_key + self.password = password + self.is_unicode = is_unicode + self.domain = domain + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_SESSION_SETUP_ANDX + + def prepare(self, message): + if self.is_unicode: + message.flags2 |= SMB_FLAGS2_UNICODE + else: + message.flags2 &= (~SMB_FLAGS2_UNICODE & 0xFFFF) + + password_len = len(self.password) + message.parameters_data = \ + self.DEFAULT_ANDX_PARAM_HEADER + \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + 16644, 10, 0, self.session_key, + (not self.is_unicode and password_len) or 0, + (self.is_unicode and password_len) or 0, + 0, + CAP_UNICODE | CAP_LARGE_FILES | CAP_STATUS32) + + est_offset = SMBMessage.HEADER_STRUCT_SIZE + len(message.parameters_data) # To check if data until SMB paramaters are aligned to a 16-bit boundary + + message.data = self.password + if (est_offset + len(message.data)) % 2 != 0 and message.flags2 & SMB_FLAGS2_UNICODE: + message.data = message.data + '\0' + + if message.flags2 & SMB_FLAGS2_UNICODE: + message.data = message.data + self.username.encode('UTF-16LE') + '\0' + else: + message.data = message.data + str(self.username) + '\0' + + if (est_offset + len(message.data)) % 2 != 0 and message.flags2 & SMB_FLAGS2_UNICODE: + message.data = message.data + '\0' + + if message.flags2 & SMB_FLAGS2_UNICODE: + message.data = message.data + self.domain.encode('UTF-16LE') + '\0\0' + 'pysmb'.encode('UTF-16LE') + '\0\0' + else: + message.data = message.data + self.domain + '\0pysmb\0' + + +class ComSessionSetupAndxResponse(Payload): + """ + Contains information on the SMB_COM_SESSION_SETUP_ANDX response from server + + If the underlying SMB message's flags2 does not have SMB_FLAGS2_EXTENDED_SECURITY bit enabled, + then the instance will have the following attributes, + - action + + If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled + and the message status is STATUS_MORE_PROCESSING_REQUIRED or equals to 0x00 (no error), + then the instance will have the following attributes, + - action + - securityblob + + If the underlying SMB message's flags2 has SMB_FLAGS2_EXTENDED_SECURITY bit enabled but + the message status is not STATUS_MORE_PROCESSING_REQUIRED + + References: + =========== + - [MS-SMB]: 2.2.4.6.2 + - [MS-CIFS]: 2.2.4.53.2 + """ + + NOSECURE_PARAMETER_STRUCT_FORMAT = '<BBHH' + NOSECURE_PARAMETER_STRUCT_SIZE = struct.calcsize(NOSECURE_PARAMETER_STRUCT_FORMAT) + + SECURE_PARAMETER_STRUCT_FORMAT = '<BBHHH' + SECURE_PARAMETER_STRUCT_SIZE = struct.calcsize(SECURE_PARAMETER_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_SESSION_SETUP_ANDX + if not message.hasExtendedSecurity: + if not message.status.hasError: + if len(message.parameters_data) < self.NOSECURE_PARAMETER_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (no security extensions) parameters', message.raw_data, message) + + _, _, _, self.action = struct.unpack(self.NOSECURE_PARAMETER_STRUCT_FORMAT, message.parameters_data[:self.NOSECURE_PARAMETER_STRUCT_SIZE]) + else: + if not message.status.hasError or message.status.internal_value == 0xc0000016: # STATUS_MORE_PROCESSING_REQUIRED + if len(message.parameters_data) < self.SECURE_PARAMETER_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (with security extensions) parameters', message.raw_data, message) + + _, _, _, self.action, blob_length = struct.unpack(self.SECURE_PARAMETER_STRUCT_FORMAT, message.parameters_data[:self.SECURE_PARAMETER_STRUCT_SIZE]) + if len(message.data) < blob_length: + raise ProtocolError('Not enough data to decode SMB_COM_SESSION_SETUP_ANDX (with security extensions) security blob', message.raw_data, message) + + self.security_blob = message.data[:blob_length] + + +class ComTreeConnectAndxRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.55.1 + - [MS-SMB]: 2.2.4.7.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, path, service, password = ''): + self.path = path + self.service = service + self.password = password + '\0' + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_TREE_CONNECT_ANDX + + def prepare(self, message): + password_len = len(self.password) + message.parameters_data = \ + self.DEFAULT_ANDX_PARAM_HEADER + \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + 0x08 | \ + ((message.hasExtendedSecurity and 0x0004) or 0x00) | \ + ((message.tid and message.tid != 0xFFFF and 0x0001) or 0x00), # Disconnect tid, if message.tid must be non-zero + password_len) + + padding = '' + if password_len % 2 == 0: + padding = '\0' + + # Note that service field is never encoded in UTF-16LE. [MS-CIFS]: 2.2.1.1 + message.data = self.password + padding + self.path.encode('UTF-16LE') + '\0\0' + self.service + '\0' + + +class ComTreeConnectAndxResponse(Payload): + """ + Contains information about the SMB_COM_TREE_CONNECT_ANDX response from the server. + + If the message has no errors, each instance contains the following attributes: + - optional_support + + References: + =========== + - [MS-CIFS]: 2.2.4.55.2 + """ + + PAYLOAD_STRUCT_FORMAT = '<BBHH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_TREE_CONNECT_ANDX + + if not message.status.hasError: + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_TREE_CONNECT_ANDX parameters', message.raw_data, message) + + _, _, _, self.optional_support = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + + +class ComNTCreateAndxRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.64.1 + - [MS-SMB]: 2.2.4.9.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<BHIIIQIIIIIB' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, filename, flags = 0, root_fid = 0, access_mask = 0, allocation_size = 0L, ext_attr = 0, + share_access = 0, create_disp = 0, create_options = 0, impersonation = 0, security_flags = 0): + self.filename = (filename + '\0').encode('UTF-16LE') + self.flags = flags + self.root_fid = root_fid + self.access_mask = access_mask + self.allocation_size = allocation_size + self.ext_attr = ext_attr + self.share_access = share_access + self.create_disp = create_disp + self.create_options = create_options + self.impersonation = impersonation + self.security_flags = security_flags + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_NT_CREATE_ANDX + + def prepare(self, message): + filename_len = len(self.filename) + + message.parameters_data = \ + self.DEFAULT_ANDX_PARAM_HEADER + \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + 0x00, # reserved + filename_len, # NameLength + self.flags, # Flags + self.root_fid, # RootDirectoryFID + self.access_mask, # DesiredAccess + self.allocation_size, # AllocationSize + self.ext_attr, # ExtFileAttributes + self.share_access, # ShareAccess + self.create_disp, # CreateDisposition + self.create_options, # CreateOptions + self.impersonation, # ImpersonationLevel + self.security_flags) # SecurityFlags + + padding = '' + if (message.HEADER_STRUCT_SIZE + len(message.parameters_data)) % 2 != 0: + padding = '\0' + + message.data = padding + self.filename + + +class ComNTCreateAndxResponse(Payload): + """ + Contains (partial) information about the SMB_COM_NT_CREATE_ANDX response from the server. + + Each instance contains the following attributes after decoding: + - oplock_level + - fid + + References: + =========== + - [MS-CIFS]: 2.2.4.64.2 + """ + PAYLOAD_STRUCT_FORMAT = '<BBHBH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_NT_CREATE_ANDX + + if not message.status.hasError: + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_NT_CREATE_ANDX parameters', message.raw_data, message) + + _, _, _, self.oplock_level, self.fid = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + + +class ComTransactionRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.33.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HHHHBBHIHHHHHH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, max_params_count, max_data_count, max_setup_count, + total_params_count = 0, total_data_count = 0, + params_bytes = '', data_bytes = '', setup_bytes = '', + flags = 0, timeout = 0, name = "\\PIPE\\"): + self.total_params_count = total_params_count or len(params_bytes) + self.total_data_count = total_data_count or len(data_bytes) + self.max_params_count = max_params_count + self.max_data_count = max_data_count + self.max_setup_count = max_setup_count + self.flags = flags + self.timeout = timeout + self.params_bytes = params_bytes + self.data_bytes = data_bytes + self.setup_bytes = setup_bytes + self.name = name + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_TRANSACTION + + def prepare(self, message): + name = (self.name + '\0').encode('UTF-16LE') + name_len = len(name) + setup_bytes_len = len(self.setup_bytes) + params_bytes_len = len(self.params_bytes) + data_bytes_len = len(self.data_bytes) + + padding0 = '' + offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) + if offset % 2 != 0: + padding0 = '\0' + offset += 1 + + offset += name_len # For the name field + padding1 = '' + if offset % 4 != 0: + padding1 = '\0'*(4-offset%4) + offset += (4-offset%4) + + if params_bytes_len > 0: + params_bytes_offset = offset + offset += params_bytes_len + else: + params_bytes_offset = 0 + + padding2 = '' + if offset % 4 != 0: + padding2 = '\0'*(4-offset%4) + offset += (4-offset%4) + + if data_bytes_len > 0: + data_bytes_offset = offset + else: + data_bytes_offset = 0 + + message.parameters_data = \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + self.total_params_count, + self.total_data_count, + self.max_params_count, + self.max_data_count, + self.max_setup_count, + 0x00, # Reserved1. Must be 0x00 + self.flags, + self.timeout, + 0x0000, # Reserved2. Must be 0x0000 + params_bytes_len, + params_bytes_offset, + data_bytes_len, + data_bytes_offset, + int(setup_bytes_len / 2)) + \ + self.setup_bytes + + message.data = padding0 + name + padding1 + self.params_bytes + padding2 + self.data_bytes + + +class ComTransactionResponse(Payload): + """ + Contains information about a SMB_COM_TRANSACTION response from the server + + After decoding, each instance contains the following attributes: + - total_params_count (integer) + - total_data_count (integer) + - setup_bytes (string) + - data_bytes (string) + - params_bytes (string) + + References: + =========== + - [MS-CIFS]: 2.2.4.33.2 + """ + + PAYLOAD_STRUCT_FORMAT = '<HHHHHHHHHH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_TRANSACTION + + if not message.status.hasError: + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message) + + self.total_params_count, self.total_data_count, _, \ + params_bytes_len, params_bytes_offset, params_bytes_displ, \ + data_bytes_len, data_bytes_offset, data_bytes_displ, \ + setup_count = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + + if setup_count > 0: + setup_bytes_len = setup_count * 2 + + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE + setup_bytes_len: + raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message) + + self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_bytes_len] + else: + self.setup_bytes = '' + + offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count * 2 + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) + + if params_bytes_len > 0: + self.params_bytes = message.data[params_bytes_offset-offset:params_bytes_offset-offset+params_bytes_len] + else: + self.params_bytes = '' + + if data_bytes_len > 0: + self.data_bytes = message.data[data_bytes_offset-offset:data_bytes_offset-offset+data_bytes_len] + else: + self.data_bytes = '' + + +class ComTransaction2Request(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.46.1 + """ + + PAYLOAD_STRUCT_FORMAT = 'HHHHBBHIHHHHHH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, max_params_count, max_data_count, max_setup_count, + total_params_count = 0, total_data_count = 0, + params_bytes = '', data_bytes = '', setup_bytes = '', + flags = 0, timeout = 0): + self.total_params_count = total_params_count or len(params_bytes) + self.total_data_count = total_data_count or len(data_bytes) + self.max_params_count = max_params_count + self.max_data_count = max_data_count + self.max_setup_count = max_setup_count + self.flags = flags + self.timeout = timeout + self.params_bytes = params_bytes + self.data_bytes = data_bytes + self.setup_bytes = setup_bytes + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_TRANSACTION2 + + def prepare(self, message): + setup_bytes_len = len(self.setup_bytes) + params_bytes_len = len(self.params_bytes) + data_bytes_len = len(self.data_bytes) + name = '\0\0' + + padding0 = '' + offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) + if offset % 2 != 0: + padding0 = '\0' + offset += 1 + + offset += 2 # For the name field + padding1 = '' + if offset % 4 != 0: + padding1 = '\0'*(4-offset%4) + + if params_bytes_len > 0: + params_bytes_offset = offset + offset += params_bytes_len + else: + params_bytes_offset = 0 + + padding2 = '' + if offset % 4 != 0: + padding2 = '\0'*(4-offset%4) + + if data_bytes_len > 0: + data_bytes_offset = offset + else: + data_bytes_offset = 0 + + message.parameters_data = \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + self.total_params_count, + self.total_data_count, + self.max_params_count, + self.max_data_count, + self.max_setup_count, + 0x00, # Reserved1. Must be 0x00 + self.flags, + self.timeout, + 0x0000, # Reserved2. Must be 0x0000 + params_bytes_len, + params_bytes_offset, + data_bytes_len, + data_bytes_offset, + int(setup_bytes_len / 2)) + \ + self.setup_bytes + + message.data = padding0 + name + padding1 + self.params_bytes + padding2 + self.data_bytes + + +class ComTransaction2Response(Payload): + """ + Contains information about a SMB_COM_TRANSACTION2 response from the server + + After decoding, each instance contains the following attributes: + - total_params_count (integer) + - total_data_count (integer) + - setup_bytes (string) + - data_bytes (string) + - params_bytes (string) + + References: + =========== + - [MS-CIFS]: 2.2.4.46.2 + """ + + PAYLOAD_STRUCT_FORMAT = '<HHHHHHHHHBB' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_TRANSACTION2 + + if not message.status.hasError: + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION2 parameters', message.raw_data, message) + + self.total_params_count, self.total_data_count, _, \ + params_bytes_len, params_bytes_offset, params_bytes_displ, \ + data_bytes_len, data_bytes_offset, data_bytes_displ, \ + setup_count, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + + if setup_count > 0: + setup_bytes_len = setup_count * 2 + + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE + setup_bytes_len: + raise ProtocolError('Not enough data to decode SMB_COM_TRANSACTION parameters', message.raw_data, message) + + self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_bytes_len] + else: + self.setup_bytes = '' + + offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count * 2 + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) + + if params_bytes_len > 0: + self.params_bytes = message.data[params_bytes_offset-offset:params_bytes_offset-offset+params_bytes_len] + else: + self.params_bytes = '' + + if data_bytes_len > 0: + self.data_bytes = message.data[data_bytes_offset-offset:data_bytes_offset-offset+data_bytes_len] + else: + self.data_bytes = '' + + +class ComCloseRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.5.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HI' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, fid, last_modified_time = 0xFFFFFFFF): + self.fid = fid + self.last_modified_time = last_modified_time + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_CLOSE + + def prepare(self, message): + message.parameters_data = struct.pack(self.PAYLOAD_STRUCT_FORMAT, self.fid, self.last_modified_time) + message.data = '' + + +class ComOpenAndxRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.41.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HHHHIHIII' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, filename, access_mode, open_mode, flags = 0x0000, search_attributes = 0, file_attributes = 0, create_time = 0, timeout = 0): + """ + @param create_time: Epoch time value to indicate the time of creation for this file. If zero, we will automatically assign the current time + @type create_time: int + @param timeout: Number of milliseconds to wait for blocked open request before failing + @type timeout: int + """ + self.filename = filename + self.access_mode = access_mode + self.open_mode = open_mode + self.flags = flags + self.search_attributes = search_attributes + self.file_attributes = file_attributes + self.create_time = create_time or int(time.time()) + self.timeout = timeout + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_OPEN_ANDX + + def prepare(self, message): + message.parameters_data = \ + self.DEFAULT_ANDX_PARAM_HEADER + \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + self.flags, + self.access_mode, + self.search_attributes, + self.file_attributes, + self.create_time, + self.open_mode, + 0, # AllocationSize + 0, # Timeout (in milli-secs) + 0) # Reserved + + message.data = '\0' + self.filename.encode('UTF-16LE') + '\0\0' + + +class ComOpenAndxResponse(Payload): + """ + Contains information about a SMB_COM_OPEN_ANDX response from the server + + After decoding, each instance will contain the following attributes: + - fid (integer) + - file_attributes (integer) + - last_write_time (long) + - access_rights (integer) + - resource_type (integer) + - open_results (integer) + + References: + =========== + - [MS-CIFS]: 2.2.4.41.2 + - [MS-SMB]: 2.2.4.1.2 + """ + + PAYLOAD_STRUCT_FORMAT = '<BBHHHIIHHHHHHH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_OPEN_ANDX + + if not message.status.hasError: + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_OPEN_ANDX parameters', message.raw_data, message) + + _, _, _, self.fid, self.file_attributes, self.last_write_time, _, \ + self.access_rights, self.resource_type, _, self.open_results, _, _, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, + message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + + +class ComWriteAndxRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.43.1 + - [MS-SMB]: 2.2.4.3.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HIIHHHHHI' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, fid, data_bytes, offset, write_mode = 0, timeout = 0): + """ + @param timeout: Number of milliseconds to wait for blocked write request before failing. Must be zero for writing to regular file + @type timeout: int + """ + self.fid = fid + self.offset = offset + self.data_bytes = data_bytes + self.timeout = timeout + self.write_mode = write_mode + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_WRITE_ANDX + + def prepare(self, message): + # constant 1 is to account for the pad byte in the message.data + # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) + data_offset = message.HEADER_STRUCT_SIZE + self.DEFAULT_ANDX_PARAM_SIZE + self.PAYLOAD_STRUCT_SIZE + 1 + 2 + data_len = len(self.data_bytes) + + message.parameters_data = \ + self.DEFAULT_ANDX_PARAM_HEADER + \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + self.fid, + self.offset & 0xFFFFFFFF, + self.timeout, + self.write_mode, + data_len, # Remaining + 0x0000, # Reserved + len(self.data_bytes), # DataLength + data_offset, # DataOffset + self.offset >> 32) # OffsetHigh field defined in [MS-SMB]: 2.2.4.3.1 + + message.data = '\0' + self.data_bytes + + +class ComWriteAndxResponse(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.43.2 + - [MS-SMB]: 2.2.4.3.2 + """ + + PAYLOAD_STRUCT_FORMAT = '<BBHHHHH' # We follow the SMB_COM_WRITEX_ANDX server extensions in [MS-SMB]: 2.2.4.3.2 + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_WRITE_ANDX + + if not message.status.hasError: + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_WRITE_ANDX parameters', message.raw_data, message) + + _, _, _, count, self.available, high_count, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + self.count = (count & 0xFFFF) | (high_count << 16) + + +class ComReadAndxRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.42.1 + - [MS-SMB]: 2.2.4.2.1 + """ + + PAYLOAD_STRUCT_FORMAT = '<HIHHIHI' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, fid, offset, max_return_bytes_count, min_return_bytes_count, timeout = 0, remaining = 0): + """ + @param timeout: If reading from a regular file, this parameter must be 0. + @type timeout: int + """ + self.fid = fid + self.remaining = remaining + self.max_return_bytes_count = max_return_bytes_count + self.min_return_bytes_count = min_return_bytes_count + self.offset = offset + self.timeout = timeout + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_READ_ANDX + + def prepare(self, message): + message.parameters_data = \ + self.DEFAULT_ANDX_PARAM_HEADER + \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + self.fid, + self.offset & 0xFFFFFFFF, + self.max_return_bytes_count, + self.min_return_bytes_count, + self.timeout or (self.max_return_bytes_count >> 32), # Note that in [MS-SMB]: 2.2.4.2.1, this field can also act as MaxCountHigh field + self.remaining, # In [MS-CIFS]: 2.2.4.42.1, this field must be set to 0x0000 + self.offset >> 32) + + message.data = '' + + +class ComReadAndxResponse(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.42.2 + - [MS-SMB]: 2.2.4.2.2 + """ + + PAYLOAD_STRUCT_FORMAT = '<BBHHHHHHHHHHH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_READ_ANDX + + if not message.status.hasError: + if len(message.parameters_data) < self.PAYLOAD_STRUCT_SIZE: + raise ProtocolError('Not enough data to decode SMB_COM_READ_ANDX parameters', message.raw_data, message) + + _, _, _, _, _, _, self.data_length, data_offset, _, _, _, _, _ = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, + message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + + offset = data_offset - message.HEADER_STRUCT_SIZE - self.PAYLOAD_STRUCT_SIZE - 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) + self.data = message.data[offset:offset+self.data_length] + assert len(self.data) == self.data_length + + +class ComDeleteRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.7.1 + """ + + def __init__(self, filename_pattern, search_attributes = 0): + self.filename_pattern = filename_pattern + self.search_attributes = search_attributes + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_DELETE + + def prepare(self, message): + message.parameters_data = struct.pack('<H', self.search_attributes) + message.data = '\x04' + self.filename_pattern.encode('UTF-16LE') + '\0\0' + + +class ComCreateDirectoryRequest(Payload): + """ + Although this command has been marked deprecated in [MS-CIFS], we continue to use it for its simplicity + as compared to its replacement TRANS2_CREATE_DIRECTORY sub-command [MS-CIFS]: 2.2.6.14 + + References: + =========== + - [MS-CIFS]: 2.2.4.1.1 + """ + + def __init__(self, path): + self.path = path + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_CREATE_DIRECTORY + + def prepare(self, message): + message.parameters_data = '' + message.data = '\x04' + self.path.encode('UTF-16LE') + '\0\0' + + +class ComDeleteDirectoryRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.2.1 + """ + + def __init__(self, path): + self.path = path + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_DELETE_DIRECTORY + + def prepare(self, message): + message.parameters_data = '' + message.data = '\x04' + self.path.encode('UTF-16LE') + '\0\0' + + +class ComRenameRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.8.1 + """ + + def __init__(self, old_path, new_path, search_attributes = 0): + self.old_path = old_path + self.new_path = new_path + self.search_attributes = search_attributes + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_RENAME + + def prepare(self, message): + message.parameters_data = struct.pack('<H', self.search_attributes) + message.data = '\x04' + self.old_path.encode('UTF-16LE') + '\x00\x00\x04\x00' + self.new_path.encode('UTF-16LE') + '\x00\x00' + + +class ComEchoRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.39.1 + """ + + def __init__(self, echo_data = b'', echo_count = 1): + self.echo_count = echo_count + self.echo_data = echo_data + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_ECHO + message.tid = 0xFFFF + + def prepare(self, message): + message.parameters_data = struct.pack('<H', self.echo_count) + message.data = self.echo_data + + +class ComEchoResponse(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.39.2 + """ + + def decode(self, message): + self.sequence_number = struct.unpack('<H', message.parameters_data[:2])[0] + self.data = message.data + + +class ComNTTransactRequest(Payload): + """ + References: + =========== + - [MS-CIFS]: 2.2.4.62.1 + """ + PAYLOAD_STRUCT_FORMAT = '<BHIIIIIIIIBH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def __init__(self, function, max_params_count, max_data_count, max_setup_count, + total_params_count = 0, total_data_count = 0, + params_bytes = '', setup_bytes = '', data_bytes = ''): + self.function = function + self.total_params_count = total_params_count or len(params_bytes) + self.total_data_count = total_data_count or len(data_bytes) + self.max_params_count = max_params_count + self.max_data_count = max_data_count + self.max_setup_count = max_setup_count + self.params_bytes = params_bytes + self.setup_bytes = setup_bytes + self.data_bytes = data_bytes + + def initMessage(self, message): + Payload.initMessage(self, message) + message.command = SMB_COM_NT_TRANSACT + + def prepare(self, message): + setup_bytes_len = len(self.setup_bytes) + params_bytes_len = len(self.params_bytes) + data_bytes_len = len(self.data_bytes) + + padding0 = '' + offset = message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_bytes_len + 2 # constant 2 is for the ByteCount field in the SMB header (i.e. field which indicates number of data bytes after the SMB parameters) + if offset % 4 != 0: + padding0 = '\0'*(4-offset%4) + offset += (4-offset%4) + + if params_bytes_len > 0: + params_bytes_offset = offset + else: + params_bytes_offset = 0 + + offset += params_bytes_len + padding1 = '' + if offset % 4 != 0: + padding1 = '\0'*(4-offset%4) + offset += (4-offset%4) + + if data_bytes_len > 0: + data_bytes_offset = offset + else: + data_bytes_offset = 0 + + message.parameters_data = \ + struct.pack(self.PAYLOAD_STRUCT_FORMAT, + self.max_setup_count, + 0x00, # Reserved1. Must be 0x00 + self.total_params_count, + self.total_data_count, + self.max_params_count, + self.max_data_count, + params_bytes_len, + params_bytes_offset, + data_bytes_len, + data_bytes_offset, + int(setup_bytes_len / 2), + self.function) + \ + self.setup_bytes + + message.data = padding0 + self.params_bytes + padding1 + self.data_bytes + + +class ComNTTransactResponse(Payload): + """ + Contains information about a SMB_COM_NT_TRANSACT response from the server + + After decoding, each instance contains the following attributes: + - total_params_count (integer) + - total_data_count (integer) + - setup_bytes (string) + - data_bytes (string) + - params_bytes (string) + + References: + =========== + - [MS-CIFS]: 2.2.4.62.2 + """ + PAYLOAD_STRUCT_FORMAT = '<3sIIIIIIIIBH' + PAYLOAD_STRUCT_SIZE = struct.calcsize(PAYLOAD_STRUCT_FORMAT) + + def decode(self, message): + assert message.command == SMB_COM_NT_TRANSACT + + if not message.status.hasError: + _, self.total_params_count, self.total_data_count, \ + params_count, params_offset, params_displ, \ + data_count, data_offset, data_displ, setup_count = struct.unpack(self.PAYLOAD_STRUCT_FORMAT, + message.parameters_data[:self.PAYLOAD_STRUCT_SIZE]) + + self.setup_bytes = message.parameters_data[self.PAYLOAD_STRUCT_SIZE:self.PAYLOAD_STRUCT_SIZE+setup_count*2] + + if params_count > 0: + params_offset -= message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count*2 + 2 + self.params_bytes = message.data[params_offset:params_offset+params_count] + else: + self.params_bytes = '' + + if data_count > 0: + data_offset -= message.HEADER_STRUCT_SIZE + self.PAYLOAD_STRUCT_SIZE + setup_count*2 + 2 + self.data_bytes = message.data[data_offset:data_offset+data_count] + else: + self.data_bytes = '' diff --git a/plugin.video.alfa/lib/sambatools/smb/utils/README.txt b/plugin.video.alfa/lib/sambatools/smb/utils/README.txt index 7dbaa8cf..c17e6721 100755 --- a/plugin.video.alfa/lib/sambatools/smb/utils/README.txt +++ b/plugin.video.alfa/lib/sambatools/smb/utils/README.txt @@ -1,12 +1,12 @@ - -md4.py and U32.py -Both modules downloaded from http://www.oocities.org/rozmanov/python/md4.html. -Licensed under LGPL - -pyDes.py 2.0.0 -Downloaded from http://twhiteman.netfirms.com/des.html -Licensed under public domain - -sha256.py -Downloaded from http://xbmc-addons.googlecode.com/svn-history/r1686/trunk/scripts/OpenSubtitles_OSD/resources/lib/sha256.py -Licensed under MIT + +md4.py and U32.py +Both modules downloaded from http://www.oocities.org/rozmanov/python/md4.html. +Licensed under LGPL + +pyDes.py 2.0.0 +Downloaded from http://twhiteman.netfirms.com/des.html +Licensed under public domain + +sha256.py +Downloaded from http://xbmc-addons.googlecode.com/svn-history/r1686/trunk/scripts/OpenSubtitles_OSD/resources/lib/sha256.py +Licensed under MIT diff --git a/plugin.video.alfa/lib/sambatools/smb/utils/__init__.py b/plugin.video.alfa/lib/sambatools/smb/utils/__init__.py index 68a05714..bce9d86c 100755 --- a/plugin.video.alfa/lib/sambatools/smb/utils/__init__.py +++ b/plugin.video.alfa/lib/sambatools/smb/utils/__init__.py @@ -1,3 +1,3 @@ - -def convertFILETIMEtoEpoch(t): - return (t - 116444736000000000L) / 10000000.0; + +def convertFILETIMEtoEpoch(t): + return (t - 116444736000000000L) / 10000000.0; diff --git a/plugin.video.alfa/lib/sambatools/smb/utils/md4.py b/plugin.video.alfa/lib/sambatools/smb/utils/md4.py index 8c2f2ee0..2f107556 100755 --- a/plugin.video.alfa/lib/sambatools/smb/utils/md4.py +++ b/plugin.video.alfa/lib/sambatools/smb/utils/md4.py @@ -1,254 +1,254 @@ -# md4.py implements md4 hash class for Python -# Version 1.0 -# Copyright (C) 2001-2002 Dmitry Rozmanov -# -# based on md4.c from "the Python Cryptography Toolkit, version 1.0.0 -# Copyright (C) 1995, A.M. Kuchling" -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# -# e-mail: dima@xenon.spb.ru -# -#==================================================================== - -# MD4 validation data - -md4_test= [ - ('', 0x31d6cfe0d16ae931b73c59d7e0c089c0L), - ("a", 0xbde52cb31de33e46245e05fbdbd6fb24L), - ("abc", 0xa448017aaf21d8525fc10ae87aa6729dL), - ("message digest", 0xd9130a8164549fe818874806e1c7014bL), - ("abcdefghijklmnopqrstuvwxyz", 0xd79e1c308aa5bbcdeea8ed63df412da9L), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - 0x043f8582f241db351ce627e153e7f0e4L), - ("12345678901234567890123456789012345678901234567890123456789012345678901234567890", - 0xe33b4ddc9c38f2199c3e7b164fcc0536L), - ] - -#==================================================================== -from U32 import U32 - -#-------------------------------------------------------------------- -class MD4: - A = None - B = None - C = None - D = None - count, len1, len2 = None, None, None - buf = [] - - #----------------------------------------------------- - def __init__(self): - - - self.A = U32(0x67452301L) - self.B = U32(0xefcdab89L) - self.C = U32(0x98badcfeL) - self.D = U32(0x10325476L) - self.count, self.len1, self.len2 = U32(0L), U32(0L), U32(0L) - self.buf = [0x00] * 64 - - #----------------------------------------------------- - def __repr__(self): - r = 'A = %s, \nB = %s, \nC = %s, \nD = %s.\n' % (self.A.__repr__(), self.B.__repr__(), self.C.__repr__(), self.D.__repr__()) - r = r + 'count = %s, \nlen1 = %s, \nlen2 = %s.\n' % (self.count.__repr__(), self.len1.__repr__(), self.len2.__repr__()) - for i in range(4): - for j in range(16): - r = r + '%4s ' % hex(self.buf[i+j]) - r = r + '\n' - - return r - #----------------------------------------------------- - def make_copy(self): - - dest = new() - - dest.len1 = self.len1 - dest.len2 = self.len2 - dest.A = self.A - dest.B = self.B - dest.C = self.C - dest.D = self.D - dest.count = self.count - for i in range(self.count): - dest.buf[i] = self.buf[i] - - return dest - - #----------------------------------------------------- - def update(self, str): - - buf = [] - for i in str: buf.append(ord(i)) - ilen = U32(len(buf)) - - # check if the first length is out of range - # as the length is measured in bits then multiplay it by 8 - if (long(self.len1 + (ilen << 3)) < long(self.len1)): - self.len2 = self.len2 + U32(1) - - self.len1 = self.len1 + (ilen << 3) - self.len2 = self.len2 + (ilen >> 29) - - L = U32(0) - bufpos = 0 - while (long(ilen) > 0): - if (64 - long(self.count)) < long(ilen): L = U32(64 - long(self.count)) - else: L = ilen - for i in range(int(L)): self.buf[i + int(self.count)] = buf[i + bufpos] - self.count = self.count + L - ilen = ilen - L - bufpos = bufpos + int(L) - - if (long(self.count) == 64L): - self.count = U32(0L) - X = [] - i = 0 - for j in range(16): - X.append(U32(self.buf[i]) + (U32(self.buf[i+1]) << 8) + \ - (U32(self.buf[i+2]) << 16) + (U32(self.buf[i+3]) << 24)) - i = i + 4 - - A = self.A - B = self.B - C = self.C - D = self.D - - A = f1(A,B,C,D, 0, 3, X) - D = f1(D,A,B,C, 1, 7, X) - C = f1(C,D,A,B, 2,11, X) - B = f1(B,C,D,A, 3,19, X) - A = f1(A,B,C,D, 4, 3, X) - D = f1(D,A,B,C, 5, 7, X) - C = f1(C,D,A,B, 6,11, X) - B = f1(B,C,D,A, 7,19, X) - A = f1(A,B,C,D, 8, 3, X) - D = f1(D,A,B,C, 9, 7, X) - C = f1(C,D,A,B,10,11, X) - B = f1(B,C,D,A,11,19, X) - A = f1(A,B,C,D,12, 3, X) - D = f1(D,A,B,C,13, 7, X) - C = f1(C,D,A,B,14,11, X) - B = f1(B,C,D,A,15,19, X) - - A = f2(A,B,C,D, 0, 3, X) - D = f2(D,A,B,C, 4, 5, X) - C = f2(C,D,A,B, 8, 9, X) - B = f2(B,C,D,A,12,13, X) - A = f2(A,B,C,D, 1, 3, X) - D = f2(D,A,B,C, 5, 5, X) - C = f2(C,D,A,B, 9, 9, X) - B = f2(B,C,D,A,13,13, X) - A = f2(A,B,C,D, 2, 3, X) - D = f2(D,A,B,C, 6, 5, X) - C = f2(C,D,A,B,10, 9, X) - B = f2(B,C,D,A,14,13, X) - A = f2(A,B,C,D, 3, 3, X) - D = f2(D,A,B,C, 7, 5, X) - C = f2(C,D,A,B,11, 9, X) - B = f2(B,C,D,A,15,13, X) - - A = f3(A,B,C,D, 0, 3, X) - D = f3(D,A,B,C, 8, 9, X) - C = f3(C,D,A,B, 4,11, X) - B = f3(B,C,D,A,12,15, X) - A = f3(A,B,C,D, 2, 3, X) - D = f3(D,A,B,C,10, 9, X) - C = f3(C,D,A,B, 6,11, X) - B = f3(B,C,D,A,14,15, X) - A = f3(A,B,C,D, 1, 3, X) - D = f3(D,A,B,C, 9, 9, X) - C = f3(C,D,A,B, 5,11, X) - B = f3(B,C,D,A,13,15, X) - A = f3(A,B,C,D, 3, 3, X) - D = f3(D,A,B,C,11, 9, X) - C = f3(C,D,A,B, 7,11, X) - B = f3(B,C,D,A,15,15, X) - - self.A = self.A + A - self.B = self.B + B - self.C = self.C + C - self.D = self.D + D - - #----------------------------------------------------- - def digest(self): - - res = [0x00] * 16 - s = [0x00] * 8 - padding = [0x00] * 64 - padding[0] = 0x80 - padlen, oldlen1, oldlen2 = U32(0), U32(0), U32(0) - - temp = self.make_copy() - - oldlen1 = temp.len1 - oldlen2 = temp.len2 - if (56 <= long(self.count)): padlen = U32(56 - long(self.count) + 64) - else: padlen = U32(56 - long(self.count)) - - temp.update(int_array2str(padding[:int(padlen)])) - - s[0]= (oldlen1) & U32(0xFF) - s[1]=((oldlen1) >> 8) & U32(0xFF) - s[2]=((oldlen1) >> 16) & U32(0xFF) - s[3]=((oldlen1) >> 24) & U32(0xFF) - s[4]= (oldlen2) & U32(0xFF) - s[5]=((oldlen2) >> 8) & U32(0xFF) - s[6]=((oldlen2) >> 16) & U32(0xFF) - s[7]=((oldlen2) >> 24) & U32(0xFF) - temp.update(int_array2str(s)) - - res[ 0]= temp.A & U32(0xFF) - res[ 1]=(temp.A >> 8) & U32(0xFF) - res[ 2]=(temp.A >> 16) & U32(0xFF) - res[ 3]=(temp.A >> 24) & U32(0xFF) - res[ 4]= temp.B & U32(0xFF) - res[ 5]=(temp.B >> 8) & U32(0xFF) - res[ 6]=(temp.B >> 16) & U32(0xFF) - res[ 7]=(temp.B >> 24) & U32(0xFF) - res[ 8]= temp.C & U32(0xFF) - res[ 9]=(temp.C >> 8) & U32(0xFF) - res[10]=(temp.C >> 16) & U32(0xFF) - res[11]=(temp.C >> 24) & U32(0xFF) - res[12]= temp.D & U32(0xFF) - res[13]=(temp.D >> 8) & U32(0xFF) - res[14]=(temp.D >> 16) & U32(0xFF) - res[15]=(temp.D >> 24) & U32(0xFF) - - return int_array2str(res) - -#==================================================================== -# helpers -def F(x, y, z): return (((x) & (y)) | ((~x) & (z))) -def G(x, y, z): return (((x) & (y)) | ((x) & (z)) | ((y) & (z))) -def H(x, y, z): return ((x) ^ (y) ^ (z)) - -def ROL(x, n): return (((x) << n) | ((x) >> (32-n))) - -def f1(a, b, c, d, k, s, X): return ROL(a + F(b, c, d) + X[k], s) -def f2(a, b, c, d, k, s, X): return ROL(a + G(b, c, d) + X[k] + U32(0x5a827999L), s) -def f3(a, b, c, d, k, s, X): return ROL(a + H(b, c, d) + X[k] + U32(0x6ed9eba1L), s) - -#-------------------------------------------------------------------- -# helper function -def int_array2str(array): - str = '' - for i in array: - str = str + chr(i) - return str - -#-------------------------------------------------------------------- -# To be able to use md4.new() instead of md4.MD4() -new = MD4 +# md4.py implements md4 hash class for Python +# Version 1.0 +# Copyright (C) 2001-2002 Dmitry Rozmanov +# +# based on md4.c from "the Python Cryptography Toolkit, version 1.0.0 +# Copyright (C) 1995, A.M. Kuchling" +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# e-mail: dima@xenon.spb.ru +# +#==================================================================== + +# MD4 validation data + +md4_test= [ + ('', 0x31d6cfe0d16ae931b73c59d7e0c089c0L), + ("a", 0xbde52cb31de33e46245e05fbdbd6fb24L), + ("abc", 0xa448017aaf21d8525fc10ae87aa6729dL), + ("message digest", 0xd9130a8164549fe818874806e1c7014bL), + ("abcdefghijklmnopqrstuvwxyz", 0xd79e1c308aa5bbcdeea8ed63df412da9L), + ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + 0x043f8582f241db351ce627e153e7f0e4L), + ("12345678901234567890123456789012345678901234567890123456789012345678901234567890", + 0xe33b4ddc9c38f2199c3e7b164fcc0536L), + ] + +#==================================================================== +from U32 import U32 + +#-------------------------------------------------------------------- +class MD4: + A = None + B = None + C = None + D = None + count, len1, len2 = None, None, None + buf = [] + + #----------------------------------------------------- + def __init__(self): + + + self.A = U32(0x67452301L) + self.B = U32(0xefcdab89L) + self.C = U32(0x98badcfeL) + self.D = U32(0x10325476L) + self.count, self.len1, self.len2 = U32(0L), U32(0L), U32(0L) + self.buf = [0x00] * 64 + + #----------------------------------------------------- + def __repr__(self): + r = 'A = %s, \nB = %s, \nC = %s, \nD = %s.\n' % (self.A.__repr__(), self.B.__repr__(), self.C.__repr__(), self.D.__repr__()) + r = r + 'count = %s, \nlen1 = %s, \nlen2 = %s.\n' % (self.count.__repr__(), self.len1.__repr__(), self.len2.__repr__()) + for i in range(4): + for j in range(16): + r = r + '%4s ' % hex(self.buf[i+j]) + r = r + '\n' + + return r + #----------------------------------------------------- + def make_copy(self): + + dest = new() + + dest.len1 = self.len1 + dest.len2 = self.len2 + dest.A = self.A + dest.B = self.B + dest.C = self.C + dest.D = self.D + dest.count = self.count + for i in range(self.count): + dest.buf[i] = self.buf[i] + + return dest + + #----------------------------------------------------- + def update(self, str): + + buf = [] + for i in str: buf.append(ord(i)) + ilen = U32(len(buf)) + + # check if the first length is out of range + # as the length is measured in bits then multiplay it by 8 + if (long(self.len1 + (ilen << 3)) < long(self.len1)): + self.len2 = self.len2 + U32(1) + + self.len1 = self.len1 + (ilen << 3) + self.len2 = self.len2 + (ilen >> 29) + + L = U32(0) + bufpos = 0 + while (long(ilen) > 0): + if (64 - long(self.count)) < long(ilen): L = U32(64 - long(self.count)) + else: L = ilen + for i in range(int(L)): self.buf[i + int(self.count)] = buf[i + bufpos] + self.count = self.count + L + ilen = ilen - L + bufpos = bufpos + int(L) + + if (long(self.count) == 64L): + self.count = U32(0L) + X = [] + i = 0 + for j in range(16): + X.append(U32(self.buf[i]) + (U32(self.buf[i+1]) << 8) + \ + (U32(self.buf[i+2]) << 16) + (U32(self.buf[i+3]) << 24)) + i = i + 4 + + A = self.A + B = self.B + C = self.C + D = self.D + + A = f1(A,B,C,D, 0, 3, X) + D = f1(D,A,B,C, 1, 7, X) + C = f1(C,D,A,B, 2,11, X) + B = f1(B,C,D,A, 3,19, X) + A = f1(A,B,C,D, 4, 3, X) + D = f1(D,A,B,C, 5, 7, X) + C = f1(C,D,A,B, 6,11, X) + B = f1(B,C,D,A, 7,19, X) + A = f1(A,B,C,D, 8, 3, X) + D = f1(D,A,B,C, 9, 7, X) + C = f1(C,D,A,B,10,11, X) + B = f1(B,C,D,A,11,19, X) + A = f1(A,B,C,D,12, 3, X) + D = f1(D,A,B,C,13, 7, X) + C = f1(C,D,A,B,14,11, X) + B = f1(B,C,D,A,15,19, X) + + A = f2(A,B,C,D, 0, 3, X) + D = f2(D,A,B,C, 4, 5, X) + C = f2(C,D,A,B, 8, 9, X) + B = f2(B,C,D,A,12,13, X) + A = f2(A,B,C,D, 1, 3, X) + D = f2(D,A,B,C, 5, 5, X) + C = f2(C,D,A,B, 9, 9, X) + B = f2(B,C,D,A,13,13, X) + A = f2(A,B,C,D, 2, 3, X) + D = f2(D,A,B,C, 6, 5, X) + C = f2(C,D,A,B,10, 9, X) + B = f2(B,C,D,A,14,13, X) + A = f2(A,B,C,D, 3, 3, X) + D = f2(D,A,B,C, 7, 5, X) + C = f2(C,D,A,B,11, 9, X) + B = f2(B,C,D,A,15,13, X) + + A = f3(A,B,C,D, 0, 3, X) + D = f3(D,A,B,C, 8, 9, X) + C = f3(C,D,A,B, 4,11, X) + B = f3(B,C,D,A,12,15, X) + A = f3(A,B,C,D, 2, 3, X) + D = f3(D,A,B,C,10, 9, X) + C = f3(C,D,A,B, 6,11, X) + B = f3(B,C,D,A,14,15, X) + A = f3(A,B,C,D, 1, 3, X) + D = f3(D,A,B,C, 9, 9, X) + C = f3(C,D,A,B, 5,11, X) + B = f3(B,C,D,A,13,15, X) + A = f3(A,B,C,D, 3, 3, X) + D = f3(D,A,B,C,11, 9, X) + C = f3(C,D,A,B, 7,11, X) + B = f3(B,C,D,A,15,15, X) + + self.A = self.A + A + self.B = self.B + B + self.C = self.C + C + self.D = self.D + D + + #----------------------------------------------------- + def digest(self): + + res = [0x00] * 16 + s = [0x00] * 8 + padding = [0x00] * 64 + padding[0] = 0x80 + padlen, oldlen1, oldlen2 = U32(0), U32(0), U32(0) + + temp = self.make_copy() + + oldlen1 = temp.len1 + oldlen2 = temp.len2 + if (56 <= long(self.count)): padlen = U32(56 - long(self.count) + 64) + else: padlen = U32(56 - long(self.count)) + + temp.update(int_array2str(padding[:int(padlen)])) + + s[0]= (oldlen1) & U32(0xFF) + s[1]=((oldlen1) >> 8) & U32(0xFF) + s[2]=((oldlen1) >> 16) & U32(0xFF) + s[3]=((oldlen1) >> 24) & U32(0xFF) + s[4]= (oldlen2) & U32(0xFF) + s[5]=((oldlen2) >> 8) & U32(0xFF) + s[6]=((oldlen2) >> 16) & U32(0xFF) + s[7]=((oldlen2) >> 24) & U32(0xFF) + temp.update(int_array2str(s)) + + res[ 0]= temp.A & U32(0xFF) + res[ 1]=(temp.A >> 8) & U32(0xFF) + res[ 2]=(temp.A >> 16) & U32(0xFF) + res[ 3]=(temp.A >> 24) & U32(0xFF) + res[ 4]= temp.B & U32(0xFF) + res[ 5]=(temp.B >> 8) & U32(0xFF) + res[ 6]=(temp.B >> 16) & U32(0xFF) + res[ 7]=(temp.B >> 24) & U32(0xFF) + res[ 8]= temp.C & U32(0xFF) + res[ 9]=(temp.C >> 8) & U32(0xFF) + res[10]=(temp.C >> 16) & U32(0xFF) + res[11]=(temp.C >> 24) & U32(0xFF) + res[12]= temp.D & U32(0xFF) + res[13]=(temp.D >> 8) & U32(0xFF) + res[14]=(temp.D >> 16) & U32(0xFF) + res[15]=(temp.D >> 24) & U32(0xFF) + + return int_array2str(res) + +#==================================================================== +# helpers +def F(x, y, z): return (((x) & (y)) | ((~x) & (z))) +def G(x, y, z): return (((x) & (y)) | ((x) & (z)) | ((y) & (z))) +def H(x, y, z): return ((x) ^ (y) ^ (z)) + +def ROL(x, n): return (((x) << n) | ((x) >> (32-n))) + +def f1(a, b, c, d, k, s, X): return ROL(a + F(b, c, d) + X[k], s) +def f2(a, b, c, d, k, s, X): return ROL(a + G(b, c, d) + X[k] + U32(0x5a827999L), s) +def f3(a, b, c, d, k, s, X): return ROL(a + H(b, c, d) + X[k] + U32(0x6ed9eba1L), s) + +#-------------------------------------------------------------------- +# helper function +def int_array2str(array): + str = '' + for i in array: + str = str + chr(i) + return str + +#-------------------------------------------------------------------- +# To be able to use md4.new() instead of md4.MD4() +new = MD4 diff --git a/plugin.video.alfa/lib/sambatools/smb/utils/pyDes.py b/plugin.video.alfa/lib/sambatools/smb/utils/pyDes.py index b04143e8..6160e2a4 100755 --- a/plugin.video.alfa/lib/sambatools/smb/utils/pyDes.py +++ b/plugin.video.alfa/lib/sambatools/smb/utils/pyDes.py @@ -1,852 +1,852 @@ -############################################################################# -# Documentation # -############################################################################# - -# Author: Todd Whiteman -# Date: 16th March, 2009 -# Verion: 2.0.0 -# License: Public Domain - free to do as you wish -# Homepage: http://twhiteman.netfirms.com/des.html -# -# This is a pure python implementation of the DES encryption algorithm. -# It's pure python to avoid portability issues, since most DES -# implementations are programmed in C (for performance reasons). -# -# Triple DES class is also implemented, utilising the DES base. Triple DES -# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key. -# -# See the README.txt that should come with this python module for the -# implementation methods used. -# -# Thanks to: -# * David Broadwell for ideas, comments and suggestions. -# * Mario Wolff for pointing out and debugging some triple des CBC errors. -# * Santiago Palladino for providing the PKCS5 padding technique. -# * Shaya for correcting the PAD_PKCS5 triple des CBC errors. -# -"""A pure python implementation of the DES and TRIPLE DES encryption algorithms. - -Class initialization --------------------- -pyDes.des(key, [mode], [IV], [pad], [padmode]) -pyDes.triple_des(key, [mode], [IV], [pad], [padmode]) - -key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes - for Triple DES -mode -> Optional argument for encryption type, can be either - pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining) -IV -> Optional Initial Value bytes, must be supplied if using CBC mode. - Length must be 8 bytes. -pad -> Optional argument, set the pad character (PAD_NORMAL) to use during - all encrypt/decrpt operations done with this instance. -padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5) - to use during all encrypt/decrpt operations done with this instance. - -I recommend to use PAD_PKCS5 padding, as then you never need to worry about any -padding issues, as the padding can be removed unambiguously upon decrypting -data that was encrypted using PAD_PKCS5 padmode. - -Common methods --------------- -encrypt(data, [pad], [padmode]) -decrypt(data, [pad], [padmode]) - -data -> Bytes to be encrypted/decrypted -pad -> Optional argument. Only when using padmode of PAD_NORMAL. For - encryption, adds this characters to the end of the data block when - data is not a multiple of 8 bytes. For decryption, will remove the - trailing characters that match this pad character from the last 8 - bytes of the unencrypted data block. -padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL - or PAD_PKCS5). Defaults to PAD_NORMAL. - - -Example -------- -from pyDes import * - -data = "Please encrypt my data" -k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5) -# For Python3, you'll need to use bytes, i.e.: -# data = b"Please encrypt my data" -# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5) -d = k.encrypt(data) -print "Encrypted: %r" % d -print "Decrypted: %r" % k.decrypt(d) -assert k.decrypt(d, padmode=PAD_PKCS5) == data - - -See the module source (pyDes.py) for more examples of use. -You can also run the pyDes.py file without and arguments to see a simple test. - -Note: This code was not written for high-end systems needing a fast - implementation, but rather a handy portable solution with small usage. - -""" - -import sys - -# _pythonMajorVersion is used to handle Python2 and Python3 differences. -_pythonMajorVersion = sys.version_info[0] - -# Modes of crypting / cyphering -ECB = 0 -CBC = 1 - -# Modes of padding -PAD_NORMAL = 1 -PAD_PKCS5 = 2 - -# PAD_PKCS5: is a method that will unambiguously remove all padding -# characters after decryption, when originally encrypted with -# this padding mode. -# For a good description of the PKCS5 padding technique, see: -# http://www.faqs.org/rfcs/rfc1423.html - -# The base class shared by des and triple des. -class _baseDes(object): - def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): - if IV: - IV = self._guardAgainstUnicode(IV) - if pad: - pad = self._guardAgainstUnicode(pad) - self.block_size = 8 - # Sanity checking of arguments. - if pad and padmode == PAD_PKCS5: - raise ValueError("Cannot use a pad character with PAD_PKCS5") - if IV and len(IV) != self.block_size: - raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes") - - # Set the passed in variables - self._mode = mode - self._iv = IV - self._padding = pad - self._padmode = padmode - - def getKey(self): - """getKey() -> bytes""" - return self.__key - - def setKey(self, key): - """Will set the crypting key for this object.""" - key = self._guardAgainstUnicode(key) - self.__key = key - - def getMode(self): - """getMode() -> pyDes.ECB or pyDes.CBC""" - return self._mode - - def setMode(self, mode): - """Sets the type of crypting mode, pyDes.ECB or pyDes.CBC""" - self._mode = mode - - def getPadding(self): - """getPadding() -> bytes of length 1. Padding character.""" - return self._padding - - def setPadding(self, pad): - """setPadding() -> bytes of length 1. Padding character.""" - if pad is not None: - pad = self._guardAgainstUnicode(pad) - self._padding = pad - - def getPadMode(self): - """getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" - return self._padmode - - def setPadMode(self, mode): - """Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" - self._padmode = mode - - def getIV(self): - """getIV() -> bytes""" - return self._iv - - def setIV(self, IV): - """Will set the Initial Value, used in conjunction with CBC mode""" - if not IV or len(IV) != self.block_size: - raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes") - IV = self._guardAgainstUnicode(IV) - self._iv = IV - - def _padData(self, data, pad, padmode): - # Pad data depending on the mode - if padmode is None: - # Get the default padding mode. - padmode = self.getPadMode() - if pad and padmode == PAD_PKCS5: - raise ValueError("Cannot use a pad character with PAD_PKCS5") - - if padmode == PAD_NORMAL: - if len(data) % self.block_size == 0: - # No padding required. - return data - - if not pad: - # Get the default padding. - pad = self.getPadding() - if not pad: - raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.") - data += (self.block_size - (len(data) % self.block_size)) * pad - - elif padmode == PAD_PKCS5: - pad_len = 8 - (len(data) % self.block_size) - if _pythonMajorVersion < 3: - data += pad_len * chr(pad_len) - else: - data += bytes([pad_len] * pad_len) - - return data - - def _unpadData(self, data, pad, padmode): - # Unpad data depending on the mode. - if not data: - return data - if pad and padmode == PAD_PKCS5: - raise ValueError("Cannot use a pad character with PAD_PKCS5") - if padmode is None: - # Get the default padding mode. - padmode = self.getPadMode() - - if padmode == PAD_NORMAL: - if not pad: - # Get the default padding. - pad = self.getPadding() - if pad: - data = data[:-self.block_size] + \ - data[-self.block_size:].rstrip(pad) - - elif padmode == PAD_PKCS5: - if _pythonMajorVersion < 3: - pad_len = ord(data[-1]) - else: - pad_len = data[-1] - data = data[:-pad_len] - - return data - - def _guardAgainstUnicode(self, data): - # Only accept byte strings or ascii unicode values, otherwise - # there is no way to correctly decode the data into bytes. - if _pythonMajorVersion < 3: - if isinstance(data, unicode): - raise ValueError("pyDes can only work with bytes, not Unicode strings.") - else: - if isinstance(data, str): - # Only accept ascii unicode values. - try: - return data.encode('ascii') - except UnicodeEncodeError: - pass - raise ValueError("pyDes can only work with encoded strings, not Unicode.") - return data - -############################################################################# -# DES # -############################################################################# -class des(_baseDes): - """DES encryption/decrytpion class - - Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes. - - pyDes.des(key,[mode], [IV]) - - key -> Bytes containing the encryption key, must be exactly 8 bytes - mode -> Optional argument for encryption type, can be either pyDes.ECB - (Electronic Code Book), pyDes.CBC (Cypher Block Chaining) - IV -> Optional Initial Value bytes, must be supplied if using CBC mode. - Must be 8 bytes in length. - pad -> Optional argument, set the pad character (PAD_NORMAL) to use - during all encrypt/decrpt operations done with this instance. - padmode -> Optional argument, set the padding mode (PAD_NORMAL or - PAD_PKCS5) to use during all encrypt/decrpt operations done - with this instance. - """ - - - # Permutation and translation tables for DES - __pc1 = [56, 48, 40, 32, 24, 16, 8, - 0, 57, 49, 41, 33, 25, 17, - 9, 1, 58, 50, 42, 34, 26, - 18, 10, 2, 59, 51, 43, 35, - 62, 54, 46, 38, 30, 22, 14, - 6, 61, 53, 45, 37, 29, 21, - 13, 5, 60, 52, 44, 36, 28, - 20, 12, 4, 27, 19, 11, 3 - ] - - # number left rotations of pc1 - __left_rotations = [ - 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 - ] - - # permuted choice key (table 2) - __pc2 = [ - 13, 16, 10, 23, 0, 4, - 2, 27, 14, 5, 20, 9, - 22, 18, 11, 3, 25, 7, - 15, 6, 26, 19, 12, 1, - 40, 51, 30, 36, 46, 54, - 29, 39, 50, 44, 32, 47, - 43, 48, 38, 55, 33, 52, - 45, 41, 49, 35, 28, 31 - ] - - # initial permutation IP - __ip = [57, 49, 41, 33, 25, 17, 9, 1, - 59, 51, 43, 35, 27, 19, 11, 3, - 61, 53, 45, 37, 29, 21, 13, 5, - 63, 55, 47, 39, 31, 23, 15, 7, - 56, 48, 40, 32, 24, 16, 8, 0, - 58, 50, 42, 34, 26, 18, 10, 2, - 60, 52, 44, 36, 28, 20, 12, 4, - 62, 54, 46, 38, 30, 22, 14, 6 - ] - - # Expansion table for turning 32 bit blocks into 48 bits - __expansion_table = [ - 31, 0, 1, 2, 3, 4, - 3, 4, 5, 6, 7, 8, - 7, 8, 9, 10, 11, 12, - 11, 12, 13, 14, 15, 16, - 15, 16, 17, 18, 19, 20, - 19, 20, 21, 22, 23, 24, - 23, 24, 25, 26, 27, 28, - 27, 28, 29, 30, 31, 0 - ] - - # The (in)famous S-boxes - __sbox = [ - # S1 - [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7, - 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8, - 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0, - 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], - - # S2 - [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10, - 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5, - 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15, - 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], - - # S3 - [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8, - 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1, - 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7, - 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], - - # S4 - [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15, - 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9, - 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4, - 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], - - # S5 - [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9, - 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6, - 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14, - 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], - - # S6 - [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11, - 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8, - 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6, - 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], - - # S7 - [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1, - 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6, - 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2, - 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], - - # S8 - [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7, - 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2, - 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8, - 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], - ] - - - # 32-bit permutation function P used on the output of the S-boxes - __p = [ - 15, 6, 19, 20, 28, 11, - 27, 16, 0, 14, 22, 25, - 4, 17, 30, 9, 1, 7, - 23,13, 31, 26, 2, 8, - 18, 12, 29, 5, 21, 10, - 3, 24 - ] - - # final permutation IP^-1 - __fp = [ - 39, 7, 47, 15, 55, 23, 63, 31, - 38, 6, 46, 14, 54, 22, 62, 30, - 37, 5, 45, 13, 53, 21, 61, 29, - 36, 4, 44, 12, 52, 20, 60, 28, - 35, 3, 43, 11, 51, 19, 59, 27, - 34, 2, 42, 10, 50, 18, 58, 26, - 33, 1, 41, 9, 49, 17, 57, 25, - 32, 0, 40, 8, 48, 16, 56, 24 - ] - - # Type of crypting being done - ENCRYPT = 0x00 - DECRYPT = 0x01 - - # Initialisation - def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): - # Sanity checking of arguments. - if len(key) != 8: - raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.") - _baseDes.__init__(self, mode, IV, pad, padmode) - self.key_size = 8 - - self.L = [] - self.R = [] - self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16) - self.final = [] - - self.setKey(key) - - def setKey(self, key): - """Will set the crypting key for this object. Must be 8 bytes.""" - _baseDes.setKey(self, key) - self.__create_sub_keys() - - def __String_to_BitList(self, data): - """Turn the string data, into a list of bits (1, 0)'s""" - if _pythonMajorVersion < 3: - # Turn the strings into integers. Python 3 uses a bytes - # class, which already has this behaviour. - data = [ord(c) for c in data] - l = len(data) * 8 - result = [0] * l - pos = 0 - for ch in data: - i = 7 - while i >= 0: - if ch & (1 << i) != 0: - result[pos] = 1 - else: - result[pos] = 0 - pos += 1 - i -= 1 - - return result - - def __BitList_to_String(self, data): - """Turn the list of bits -> data, into a string""" - result = [] - pos = 0 - c = 0 - while pos < len(data): - c += data[pos] << (7 - (pos % 8)) - if (pos % 8) == 7: - result.append(c) - c = 0 - pos += 1 - - if _pythonMajorVersion < 3: - return ''.join([ chr(c) for c in result ]) - else: - return bytes(result) - - def __permutate(self, table, block): - """Permutate this block with the specified table""" - return list(map(lambda x: block[x], table)) - - # Transform the secret key, so that it is ready for data processing - # Create the 16 subkeys, K[1] - K[16] - def __create_sub_keys(self): - """Create the 16 subkeys K[1] to K[16] from the given key""" - key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey())) - i = 0 - # Split into Left and Right sections - self.L = key[:28] - self.R = key[28:] - while i < 16: - j = 0 - # Perform circular left shifts - while j < des.__left_rotations[i]: - self.L.append(self.L[0]) - del self.L[0] - - self.R.append(self.R[0]) - del self.R[0] - - j += 1 - - # Create one of the 16 subkeys through pc2 permutation - self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R) - - i += 1 - - # Main part of the encryption algorithm, the number cruncher :) - def __des_crypt(self, block, crypt_type): - """Crypt the block of data through DES bit-manipulation""" - block = self.__permutate(des.__ip, block) - self.L = block[:32] - self.R = block[32:] - - # Encryption starts from Kn[1] through to Kn[16] - if crypt_type == des.ENCRYPT: - iteration = 0 - iteration_adjustment = 1 - # Decryption starts from Kn[16] down to Kn[1] - else: - iteration = 15 - iteration_adjustment = -1 - - i = 0 - while i < 16: - # Make a copy of R[i-1], this will later become L[i] - tempR = self.R[:] - - # Permutate R[i - 1] to start creating R[i] - self.R = self.__permutate(des.__expansion_table, self.R) - - # Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here - self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration])) - B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]] - # Optimization: Replaced below commented code with above - #j = 0 - #B = [] - #while j < len(self.R): - # self.R[j] = self.R[j] ^ self.Kn[iteration][j] - # j += 1 - # if j % 6 == 0: - # B.append(self.R[j-6:j]) - - # Permutate B[1] to B[8] using the S-Boxes - j = 0 - Bn = [0] * 32 - pos = 0 - while j < 8: - # Work out the offsets - m = (B[j][0] << 1) + B[j][5] - n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4] - - # Find the permutation value - v = des.__sbox[j][(m << 4) + n] - - # Turn value into bits, add it to result: Bn - Bn[pos] = (v & 8) >> 3 - Bn[pos + 1] = (v & 4) >> 2 - Bn[pos + 2] = (v & 2) >> 1 - Bn[pos + 3] = v & 1 - - pos += 4 - j += 1 - - # Permutate the concatination of B[1] to B[8] (Bn) - self.R = self.__permutate(des.__p, Bn) - - # Xor with L[i - 1] - self.R = list(map(lambda x, y: x ^ y, self.R, self.L)) - # Optimization: This now replaces the below commented code - #j = 0 - #while j < len(self.R): - # self.R[j] = self.R[j] ^ self.L[j] - # j += 1 - - # L[i] becomes R[i - 1] - self.L = tempR - - i += 1 - iteration += iteration_adjustment - - # Final permutation of R[16]L[16] - self.final = self.__permutate(des.__fp, self.R + self.L) - return self.final - - - # Data to be encrypted/decrypted - def crypt(self, data, crypt_type): - """Crypt the data in blocks, running it through des_crypt()""" - - # Error check the data - if not data: - return '' - if len(data) % self.block_size != 0: - if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks - raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.") - if not self.getPadding(): - raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character") - else: - data += (self.block_size - (len(data) % self.block_size)) * self.getPadding() - # print "Len of data: %f" % (len(data) / self.block_size) - - if self.getMode() == CBC: - if self.getIV(): - iv = self.__String_to_BitList(self.getIV()) - else: - raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering") - - # Split the data into blocks, crypting each one seperately - i = 0 - dict = {} - result = [] - #cached = 0 - #lines = 0 - while i < len(data): - # Test code for caching encryption results - #lines += 1 - #if dict.has_key(data[i:i+8]): - #print "Cached result for: %s" % data[i:i+8] - # cached += 1 - # result.append(dict[data[i:i+8]]) - # i += 8 - # continue - - block = self.__String_to_BitList(data[i:i+8]) - - # Xor with IV if using CBC mode - if self.getMode() == CBC: - if crypt_type == des.ENCRYPT: - block = list(map(lambda x, y: x ^ y, block, iv)) - #j = 0 - #while j < len(block): - # block[j] = block[j] ^ iv[j] - # j += 1 - - processed_block = self.__des_crypt(block, crypt_type) - - if crypt_type == des.DECRYPT: - processed_block = list(map(lambda x, y: x ^ y, processed_block, iv)) - #j = 0 - #while j < len(processed_block): - # processed_block[j] = processed_block[j] ^ iv[j] - # j += 1 - iv = block - else: - iv = processed_block - else: - processed_block = self.__des_crypt(block, crypt_type) - - - # Add the resulting crypted block to our list - #d = self.__BitList_to_String(processed_block) - #result.append(d) - result.append(self.__BitList_to_String(processed_block)) - #dict[data[i:i+8]] = d - i += 8 - - # print "Lines: %d, cached: %d" % (lines, cached) - - # Return the full crypted string - if _pythonMajorVersion < 3: - return ''.join(result) - else: - return bytes.fromhex('').join(result) - - def encrypt(self, data, pad=None, padmode=None): - """encrypt(data, [pad], [padmode]) -> bytes - - data : Bytes to be encrypted - pad : Optional argument for encryption padding. Must only be one byte - padmode : Optional argument for overriding the padding mode. - - The data must be a multiple of 8 bytes and will be encrypted - with the already specified key. Data does not have to be a - multiple of 8 bytes if the padding character is supplied, or - the padmode is set to PAD_PKCS5, as bytes will then added to - ensure the be padded data is a multiple of 8 bytes. - """ - data = self._guardAgainstUnicode(data) - if pad is not None: - pad = self._guardAgainstUnicode(pad) - data = self._padData(data, pad, padmode) - return self.crypt(data, des.ENCRYPT) - - def decrypt(self, data, pad=None, padmode=None): - """decrypt(data, [pad], [padmode]) -> bytes - - data : Bytes to be encrypted - pad : Optional argument for decryption padding. Must only be one byte - padmode : Optional argument for overriding the padding mode. - - The data must be a multiple of 8 bytes and will be decrypted - with the already specified key. In PAD_NORMAL mode, if the - optional padding character is supplied, then the un-encrypted - data will have the padding characters removed from the end of - the bytes. This pad removal only occurs on the last 8 bytes of - the data (last data block). In PAD_PKCS5 mode, the special - padding end markers will be removed from the data after decrypting. - """ - data = self._guardAgainstUnicode(data) - if pad is not None: - pad = self._guardAgainstUnicode(pad) - data = self.crypt(data, des.DECRYPT) - return self._unpadData(data, pad, padmode) - - - -############################################################################# -# Triple DES # -############################################################################# -class triple_des(_baseDes): - """Triple DES encryption/decrytpion class - - This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or - the DES-EDE2 (when a 16 byte key is supplied) encryption methods. - Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes. - - pyDes.des(key, [mode], [IV]) - - key -> Bytes containing the encryption key, must be either 16 or - 24 bytes long - mode -> Optional argument for encryption type, can be either pyDes.ECB - (Electronic Code Book), pyDes.CBC (Cypher Block Chaining) - IV -> Optional Initial Value bytes, must be supplied if using CBC mode. - Must be 8 bytes in length. - pad -> Optional argument, set the pad character (PAD_NORMAL) to use - during all encrypt/decrpt operations done with this instance. - padmode -> Optional argument, set the padding mode (PAD_NORMAL or - PAD_PKCS5) to use during all encrypt/decrpt operations done - with this instance. - """ - def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): - _baseDes.__init__(self, mode, IV, pad, padmode) - self.setKey(key) - - def setKey(self, key): - """Will set the crypting key for this object. Either 16 or 24 bytes long.""" - self.key_size = 24 # Use DES-EDE3 mode - if len(key) != self.key_size: - if len(key) == 16: # Use DES-EDE2 mode - self.key_size = 16 - else: - raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long") - if self.getMode() == CBC: - if not self.getIV(): - # Use the first 8 bytes of the key - self._iv = key[:self.block_size] - if len(self.getIV()) != self.block_size: - raise ValueError("Invalid IV, must be 8 bytes in length") - self.__key1 = des(key[:8], self._mode, self._iv, - self._padding, self._padmode) - self.__key2 = des(key[8:16], self._mode, self._iv, - self._padding, self._padmode) - if self.key_size == 16: - self.__key3 = self.__key1 - else: - self.__key3 = des(key[16:], self._mode, self._iv, - self._padding, self._padmode) - _baseDes.setKey(self, key) - - # Override setter methods to work on all 3 keys. - - def setMode(self, mode): - """Sets the type of crypting mode, pyDes.ECB or pyDes.CBC""" - _baseDes.setMode(self, mode) - for key in (self.__key1, self.__key2, self.__key3): - key.setMode(mode) - - def setPadding(self, pad): - """setPadding() -> bytes of length 1. Padding character.""" - _baseDes.setPadding(self, pad) - for key in (self.__key1, self.__key2, self.__key3): - key.setPadding(pad) - - def setPadMode(self, mode): - """Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" - _baseDes.setPadMode(self, mode) - for key in (self.__key1, self.__key2, self.__key3): - key.setPadMode(mode) - - def setIV(self, IV): - """Will set the Initial Value, used in conjunction with CBC mode""" - _baseDes.setIV(self, IV) - for key in (self.__key1, self.__key2, self.__key3): - key.setIV(IV) - - def encrypt(self, data, pad=None, padmode=None): - """encrypt(data, [pad], [padmode]) -> bytes - - data : bytes to be encrypted - pad : Optional argument for encryption padding. Must only be one byte - padmode : Optional argument for overriding the padding mode. - - The data must be a multiple of 8 bytes and will be encrypted - with the already specified key. Data does not have to be a - multiple of 8 bytes if the padding character is supplied, or - the padmode is set to PAD_PKCS5, as bytes will then added to - ensure the be padded data is a multiple of 8 bytes. - """ - ENCRYPT = des.ENCRYPT - DECRYPT = des.DECRYPT - data = self._guardAgainstUnicode(data) - if pad is not None: - pad = self._guardAgainstUnicode(pad) - # Pad the data accordingly. - data = self._padData(data, pad, padmode) - if self.getMode() == CBC: - self.__key1.setIV(self.getIV()) - self.__key2.setIV(self.getIV()) - self.__key3.setIV(self.getIV()) - i = 0 - result = [] - while i < len(data): - block = self.__key1.crypt(data[i:i+8], ENCRYPT) - block = self.__key2.crypt(block, DECRYPT) - block = self.__key3.crypt(block, ENCRYPT) - self.__key1.setIV(block) - self.__key2.setIV(block) - self.__key3.setIV(block) - result.append(block) - i += 8 - if _pythonMajorVersion < 3: - return ''.join(result) - else: - return bytes.fromhex('').join(result) - else: - data = self.__key1.crypt(data, ENCRYPT) - data = self.__key2.crypt(data, DECRYPT) - return self.__key3.crypt(data, ENCRYPT) - - def decrypt(self, data, pad=None, padmode=None): - """decrypt(data, [pad], [padmode]) -> bytes - - data : bytes to be encrypted - pad : Optional argument for decryption padding. Must only be one byte - padmode : Optional argument for overriding the padding mode. - - The data must be a multiple of 8 bytes and will be decrypted - with the already specified key. In PAD_NORMAL mode, if the - optional padding character is supplied, then the un-encrypted - data will have the padding characters removed from the end of - the bytes. This pad removal only occurs on the last 8 bytes of - the data (last data block). In PAD_PKCS5 mode, the special - padding end markers will be removed from the data after - decrypting, no pad character is required for PAD_PKCS5. - """ - ENCRYPT = des.ENCRYPT - DECRYPT = des.DECRYPT - data = self._guardAgainstUnicode(data) - if pad is not None: - pad = self._guardAgainstUnicode(pad) - if self.getMode() == CBC: - self.__key1.setIV(self.getIV()) - self.__key2.setIV(self.getIV()) - self.__key3.setIV(self.getIV()) - i = 0 - result = [] - while i < len(data): - iv = data[i:i+8] - block = self.__key3.crypt(iv, DECRYPT) - block = self.__key2.crypt(block, ENCRYPT) - block = self.__key1.crypt(block, DECRYPT) - self.__key1.setIV(iv) - self.__key2.setIV(iv) - self.__key3.setIV(iv) - result.append(block) - i += 8 - if _pythonMajorVersion < 3: - data = ''.join(result) - else: - data = bytes.fromhex('').join(result) - else: - data = self.__key3.crypt(data, DECRYPT) - data = self.__key2.crypt(data, ENCRYPT) - data = self.__key1.crypt(data, DECRYPT) - return self._unpadData(data, pad, padmode) +############################################################################# +# Documentation # +############################################################################# + +# Author: Todd Whiteman +# Date: 16th March, 2009 +# Verion: 2.0.0 +# License: Public Domain - free to do as you wish +# Homepage: http://twhiteman.netfirms.com/des.html +# +# This is a pure python implementation of the DES encryption algorithm. +# It's pure python to avoid portability issues, since most DES +# implementations are programmed in C (for performance reasons). +# +# Triple DES class is also implemented, utilising the DES base. Triple DES +# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key. +# +# See the README.txt that should come with this python module for the +# implementation methods used. +# +# Thanks to: +# * David Broadwell for ideas, comments and suggestions. +# * Mario Wolff for pointing out and debugging some triple des CBC errors. +# * Santiago Palladino for providing the PKCS5 padding technique. +# * Shaya for correcting the PAD_PKCS5 triple des CBC errors. +# +"""A pure python implementation of the DES and TRIPLE DES encryption algorithms. + +Class initialization +-------------------- +pyDes.des(key, [mode], [IV], [pad], [padmode]) +pyDes.triple_des(key, [mode], [IV], [pad], [padmode]) + +key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes + for Triple DES +mode -> Optional argument for encryption type, can be either + pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining) +IV -> Optional Initial Value bytes, must be supplied if using CBC mode. + Length must be 8 bytes. +pad -> Optional argument, set the pad character (PAD_NORMAL) to use during + all encrypt/decrpt operations done with this instance. +padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5) + to use during all encrypt/decrpt operations done with this instance. + +I recommend to use PAD_PKCS5 padding, as then you never need to worry about any +padding issues, as the padding can be removed unambiguously upon decrypting +data that was encrypted using PAD_PKCS5 padmode. + +Common methods +-------------- +encrypt(data, [pad], [padmode]) +decrypt(data, [pad], [padmode]) + +data -> Bytes to be encrypted/decrypted +pad -> Optional argument. Only when using padmode of PAD_NORMAL. For + encryption, adds this characters to the end of the data block when + data is not a multiple of 8 bytes. For decryption, will remove the + trailing characters that match this pad character from the last 8 + bytes of the unencrypted data block. +padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL + or PAD_PKCS5). Defaults to PAD_NORMAL. + + +Example +------- +from pyDes import * + +data = "Please encrypt my data" +k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5) +# For Python3, you'll need to use bytes, i.e.: +# data = b"Please encrypt my data" +# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5) +d = k.encrypt(data) +print "Encrypted: %r" % d +print "Decrypted: %r" % k.decrypt(d) +assert k.decrypt(d, padmode=PAD_PKCS5) == data + + +See the module source (pyDes.py) for more examples of use. +You can also run the pyDes.py file without and arguments to see a simple test. + +Note: This code was not written for high-end systems needing a fast + implementation, but rather a handy portable solution with small usage. + +""" + +import sys + +# _pythonMajorVersion is used to handle Python2 and Python3 differences. +_pythonMajorVersion = sys.version_info[0] + +# Modes of crypting / cyphering +ECB = 0 +CBC = 1 + +# Modes of padding +PAD_NORMAL = 1 +PAD_PKCS5 = 2 + +# PAD_PKCS5: is a method that will unambiguously remove all padding +# characters after decryption, when originally encrypted with +# this padding mode. +# For a good description of the PKCS5 padding technique, see: +# http://www.faqs.org/rfcs/rfc1423.html + +# The base class shared by des and triple des. +class _baseDes(object): + def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): + if IV: + IV = self._guardAgainstUnicode(IV) + if pad: + pad = self._guardAgainstUnicode(pad) + self.block_size = 8 + # Sanity checking of arguments. + if pad and padmode == PAD_PKCS5: + raise ValueError("Cannot use a pad character with PAD_PKCS5") + if IV and len(IV) != self.block_size: + raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes") + + # Set the passed in variables + self._mode = mode + self._iv = IV + self._padding = pad + self._padmode = padmode + + def getKey(self): + """getKey() -> bytes""" + return self.__key + + def setKey(self, key): + """Will set the crypting key for this object.""" + key = self._guardAgainstUnicode(key) + self.__key = key + + def getMode(self): + """getMode() -> pyDes.ECB or pyDes.CBC""" + return self._mode + + def setMode(self, mode): + """Sets the type of crypting mode, pyDes.ECB or pyDes.CBC""" + self._mode = mode + + def getPadding(self): + """getPadding() -> bytes of length 1. Padding character.""" + return self._padding + + def setPadding(self, pad): + """setPadding() -> bytes of length 1. Padding character.""" + if pad is not None: + pad = self._guardAgainstUnicode(pad) + self._padding = pad + + def getPadMode(self): + """getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" + return self._padmode + + def setPadMode(self, mode): + """Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" + self._padmode = mode + + def getIV(self): + """getIV() -> bytes""" + return self._iv + + def setIV(self, IV): + """Will set the Initial Value, used in conjunction with CBC mode""" + if not IV or len(IV) != self.block_size: + raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes") + IV = self._guardAgainstUnicode(IV) + self._iv = IV + + def _padData(self, data, pad, padmode): + # Pad data depending on the mode + if padmode is None: + # Get the default padding mode. + padmode = self.getPadMode() + if pad and padmode == PAD_PKCS5: + raise ValueError("Cannot use a pad character with PAD_PKCS5") + + if padmode == PAD_NORMAL: + if len(data) % self.block_size == 0: + # No padding required. + return data + + if not pad: + # Get the default padding. + pad = self.getPadding() + if not pad: + raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.") + data += (self.block_size - (len(data) % self.block_size)) * pad + + elif padmode == PAD_PKCS5: + pad_len = 8 - (len(data) % self.block_size) + if _pythonMajorVersion < 3: + data += pad_len * chr(pad_len) + else: + data += bytes([pad_len] * pad_len) + + return data + + def _unpadData(self, data, pad, padmode): + # Unpad data depending on the mode. + if not data: + return data + if pad and padmode == PAD_PKCS5: + raise ValueError("Cannot use a pad character with PAD_PKCS5") + if padmode is None: + # Get the default padding mode. + padmode = self.getPadMode() + + if padmode == PAD_NORMAL: + if not pad: + # Get the default padding. + pad = self.getPadding() + if pad: + data = data[:-self.block_size] + \ + data[-self.block_size:].rstrip(pad) + + elif padmode == PAD_PKCS5: + if _pythonMajorVersion < 3: + pad_len = ord(data[-1]) + else: + pad_len = data[-1] + data = data[:-pad_len] + + return data + + def _guardAgainstUnicode(self, data): + # Only accept byte strings or ascii unicode values, otherwise + # there is no way to correctly decode the data into bytes. + if _pythonMajorVersion < 3: + if isinstance(data, unicode): + raise ValueError("pyDes can only work with bytes, not Unicode strings.") + else: + if isinstance(data, str): + # Only accept ascii unicode values. + try: + return data.encode('ascii') + except UnicodeEncodeError: + pass + raise ValueError("pyDes can only work with encoded strings, not Unicode.") + return data + +############################################################################# +# DES # +############################################################################# +class des(_baseDes): + """DES encryption/decrytpion class + + Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes. + + pyDes.des(key,[mode], [IV]) + + key -> Bytes containing the encryption key, must be exactly 8 bytes + mode -> Optional argument for encryption type, can be either pyDes.ECB + (Electronic Code Book), pyDes.CBC (Cypher Block Chaining) + IV -> Optional Initial Value bytes, must be supplied if using CBC mode. + Must be 8 bytes in length. + pad -> Optional argument, set the pad character (PAD_NORMAL) to use + during all encrypt/decrpt operations done with this instance. + padmode -> Optional argument, set the padding mode (PAD_NORMAL or + PAD_PKCS5) to use during all encrypt/decrpt operations done + with this instance. + """ + + + # Permutation and translation tables for DES + __pc1 = [56, 48, 40, 32, 24, 16, 8, + 0, 57, 49, 41, 33, 25, 17, + 9, 1, 58, 50, 42, 34, 26, + 18, 10, 2, 59, 51, 43, 35, + 62, 54, 46, 38, 30, 22, 14, + 6, 61, 53, 45, 37, 29, 21, + 13, 5, 60, 52, 44, 36, 28, + 20, 12, 4, 27, 19, 11, 3 + ] + + # number left rotations of pc1 + __left_rotations = [ + 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 + ] + + # permuted choice key (table 2) + __pc2 = [ + 13, 16, 10, 23, 0, 4, + 2, 27, 14, 5, 20, 9, + 22, 18, 11, 3, 25, 7, + 15, 6, 26, 19, 12, 1, + 40, 51, 30, 36, 46, 54, + 29, 39, 50, 44, 32, 47, + 43, 48, 38, 55, 33, 52, + 45, 41, 49, 35, 28, 31 + ] + + # initial permutation IP + __ip = [57, 49, 41, 33, 25, 17, 9, 1, + 59, 51, 43, 35, 27, 19, 11, 3, + 61, 53, 45, 37, 29, 21, 13, 5, + 63, 55, 47, 39, 31, 23, 15, 7, + 56, 48, 40, 32, 24, 16, 8, 0, + 58, 50, 42, 34, 26, 18, 10, 2, + 60, 52, 44, 36, 28, 20, 12, 4, + 62, 54, 46, 38, 30, 22, 14, 6 + ] + + # Expansion table for turning 32 bit blocks into 48 bits + __expansion_table = [ + 31, 0, 1, 2, 3, 4, + 3, 4, 5, 6, 7, 8, + 7, 8, 9, 10, 11, 12, + 11, 12, 13, 14, 15, 16, + 15, 16, 17, 18, 19, 20, + 19, 20, 21, 22, 23, 24, + 23, 24, 25, 26, 27, 28, + 27, 28, 29, 30, 31, 0 + ] + + # The (in)famous S-boxes + __sbox = [ + # S1 + [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7, + 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8, + 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0, + 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], + + # S2 + [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10, + 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5, + 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15, + 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], + + # S3 + [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8, + 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1, + 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7, + 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], + + # S4 + [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15, + 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9, + 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4, + 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], + + # S5 + [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9, + 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6, + 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14, + 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], + + # S6 + [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11, + 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8, + 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6, + 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], + + # S7 + [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1, + 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6, + 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2, + 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], + + # S8 + [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7, + 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2, + 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8, + 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], + ] + + + # 32-bit permutation function P used on the output of the S-boxes + __p = [ + 15, 6, 19, 20, 28, 11, + 27, 16, 0, 14, 22, 25, + 4, 17, 30, 9, 1, 7, + 23,13, 31, 26, 2, 8, + 18, 12, 29, 5, 21, 10, + 3, 24 + ] + + # final permutation IP^-1 + __fp = [ + 39, 7, 47, 15, 55, 23, 63, 31, + 38, 6, 46, 14, 54, 22, 62, 30, + 37, 5, 45, 13, 53, 21, 61, 29, + 36, 4, 44, 12, 52, 20, 60, 28, + 35, 3, 43, 11, 51, 19, 59, 27, + 34, 2, 42, 10, 50, 18, 58, 26, + 33, 1, 41, 9, 49, 17, 57, 25, + 32, 0, 40, 8, 48, 16, 56, 24 + ] + + # Type of crypting being done + ENCRYPT = 0x00 + DECRYPT = 0x01 + + # Initialisation + def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): + # Sanity checking of arguments. + if len(key) != 8: + raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.") + _baseDes.__init__(self, mode, IV, pad, padmode) + self.key_size = 8 + + self.L = [] + self.R = [] + self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16) + self.final = [] + + self.setKey(key) + + def setKey(self, key): + """Will set the crypting key for this object. Must be 8 bytes.""" + _baseDes.setKey(self, key) + self.__create_sub_keys() + + def __String_to_BitList(self, data): + """Turn the string data, into a list of bits (1, 0)'s""" + if _pythonMajorVersion < 3: + # Turn the strings into integers. Python 3 uses a bytes + # class, which already has this behaviour. + data = [ord(c) for c in data] + l = len(data) * 8 + result = [0] * l + pos = 0 + for ch in data: + i = 7 + while i >= 0: + if ch & (1 << i) != 0: + result[pos] = 1 + else: + result[pos] = 0 + pos += 1 + i -= 1 + + return result + + def __BitList_to_String(self, data): + """Turn the list of bits -> data, into a string""" + result = [] + pos = 0 + c = 0 + while pos < len(data): + c += data[pos] << (7 - (pos % 8)) + if (pos % 8) == 7: + result.append(c) + c = 0 + pos += 1 + + if _pythonMajorVersion < 3: + return ''.join([ chr(c) for c in result ]) + else: + return bytes(result) + + def __permutate(self, table, block): + """Permutate this block with the specified table""" + return list(map(lambda x: block[x], table)) + + # Transform the secret key, so that it is ready for data processing + # Create the 16 subkeys, K[1] - K[16] + def __create_sub_keys(self): + """Create the 16 subkeys K[1] to K[16] from the given key""" + key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey())) + i = 0 + # Split into Left and Right sections + self.L = key[:28] + self.R = key[28:] + while i < 16: + j = 0 + # Perform circular left shifts + while j < des.__left_rotations[i]: + self.L.append(self.L[0]) + del self.L[0] + + self.R.append(self.R[0]) + del self.R[0] + + j += 1 + + # Create one of the 16 subkeys through pc2 permutation + self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R) + + i += 1 + + # Main part of the encryption algorithm, the number cruncher :) + def __des_crypt(self, block, crypt_type): + """Crypt the block of data through DES bit-manipulation""" + block = self.__permutate(des.__ip, block) + self.L = block[:32] + self.R = block[32:] + + # Encryption starts from Kn[1] through to Kn[16] + if crypt_type == des.ENCRYPT: + iteration = 0 + iteration_adjustment = 1 + # Decryption starts from Kn[16] down to Kn[1] + else: + iteration = 15 + iteration_adjustment = -1 + + i = 0 + while i < 16: + # Make a copy of R[i-1], this will later become L[i] + tempR = self.R[:] + + # Permutate R[i - 1] to start creating R[i] + self.R = self.__permutate(des.__expansion_table, self.R) + + # Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here + self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration])) + B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]] + # Optimization: Replaced below commented code with above + #j = 0 + #B = [] + #while j < len(self.R): + # self.R[j] = self.R[j] ^ self.Kn[iteration][j] + # j += 1 + # if j % 6 == 0: + # B.append(self.R[j-6:j]) + + # Permutate B[1] to B[8] using the S-Boxes + j = 0 + Bn = [0] * 32 + pos = 0 + while j < 8: + # Work out the offsets + m = (B[j][0] << 1) + B[j][5] + n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4] + + # Find the permutation value + v = des.__sbox[j][(m << 4) + n] + + # Turn value into bits, add it to result: Bn + Bn[pos] = (v & 8) >> 3 + Bn[pos + 1] = (v & 4) >> 2 + Bn[pos + 2] = (v & 2) >> 1 + Bn[pos + 3] = v & 1 + + pos += 4 + j += 1 + + # Permutate the concatination of B[1] to B[8] (Bn) + self.R = self.__permutate(des.__p, Bn) + + # Xor with L[i - 1] + self.R = list(map(lambda x, y: x ^ y, self.R, self.L)) + # Optimization: This now replaces the below commented code + #j = 0 + #while j < len(self.R): + # self.R[j] = self.R[j] ^ self.L[j] + # j += 1 + + # L[i] becomes R[i - 1] + self.L = tempR + + i += 1 + iteration += iteration_adjustment + + # Final permutation of R[16]L[16] + self.final = self.__permutate(des.__fp, self.R + self.L) + return self.final + + + # Data to be encrypted/decrypted + def crypt(self, data, crypt_type): + """Crypt the data in blocks, running it through des_crypt()""" + + # Error check the data + if not data: + return '' + if len(data) % self.block_size != 0: + if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks + raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.") + if not self.getPadding(): + raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character") + else: + data += (self.block_size - (len(data) % self.block_size)) * self.getPadding() + # print "Len of data: %f" % (len(data) / self.block_size) + + if self.getMode() == CBC: + if self.getIV(): + iv = self.__String_to_BitList(self.getIV()) + else: + raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering") + + # Split the data into blocks, crypting each one seperately + i = 0 + dict = {} + result = [] + #cached = 0 + #lines = 0 + while i < len(data): + # Test code for caching encryption results + #lines += 1 + #if dict.has_key(data[i:i+8]): + #print "Cached result for: %s" % data[i:i+8] + # cached += 1 + # result.append(dict[data[i:i+8]]) + # i += 8 + # continue + + block = self.__String_to_BitList(data[i:i+8]) + + # Xor with IV if using CBC mode + if self.getMode() == CBC: + if crypt_type == des.ENCRYPT: + block = list(map(lambda x, y: x ^ y, block, iv)) + #j = 0 + #while j < len(block): + # block[j] = block[j] ^ iv[j] + # j += 1 + + processed_block = self.__des_crypt(block, crypt_type) + + if crypt_type == des.DECRYPT: + processed_block = list(map(lambda x, y: x ^ y, processed_block, iv)) + #j = 0 + #while j < len(processed_block): + # processed_block[j] = processed_block[j] ^ iv[j] + # j += 1 + iv = block + else: + iv = processed_block + else: + processed_block = self.__des_crypt(block, crypt_type) + + + # Add the resulting crypted block to our list + #d = self.__BitList_to_String(processed_block) + #result.append(d) + result.append(self.__BitList_to_String(processed_block)) + #dict[data[i:i+8]] = d + i += 8 + + # print "Lines: %d, cached: %d" % (lines, cached) + + # Return the full crypted string + if _pythonMajorVersion < 3: + return ''.join(result) + else: + return bytes.fromhex('').join(result) + + def encrypt(self, data, pad=None, padmode=None): + """encrypt(data, [pad], [padmode]) -> bytes + + data : Bytes to be encrypted + pad : Optional argument for encryption padding. Must only be one byte + padmode : Optional argument for overriding the padding mode. + + The data must be a multiple of 8 bytes and will be encrypted + with the already specified key. Data does not have to be a + multiple of 8 bytes if the padding character is supplied, or + the padmode is set to PAD_PKCS5, as bytes will then added to + ensure the be padded data is a multiple of 8 bytes. + """ + data = self._guardAgainstUnicode(data) + if pad is not None: + pad = self._guardAgainstUnicode(pad) + data = self._padData(data, pad, padmode) + return self.crypt(data, des.ENCRYPT) + + def decrypt(self, data, pad=None, padmode=None): + """decrypt(data, [pad], [padmode]) -> bytes + + data : Bytes to be encrypted + pad : Optional argument for decryption padding. Must only be one byte + padmode : Optional argument for overriding the padding mode. + + The data must be a multiple of 8 bytes and will be decrypted + with the already specified key. In PAD_NORMAL mode, if the + optional padding character is supplied, then the un-encrypted + data will have the padding characters removed from the end of + the bytes. This pad removal only occurs on the last 8 bytes of + the data (last data block). In PAD_PKCS5 mode, the special + padding end markers will be removed from the data after decrypting. + """ + data = self._guardAgainstUnicode(data) + if pad is not None: + pad = self._guardAgainstUnicode(pad) + data = self.crypt(data, des.DECRYPT) + return self._unpadData(data, pad, padmode) + + + +############################################################################# +# Triple DES # +############################################################################# +class triple_des(_baseDes): + """Triple DES encryption/decrytpion class + + This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or + the DES-EDE2 (when a 16 byte key is supplied) encryption methods. + Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes. + + pyDes.des(key, [mode], [IV]) + + key -> Bytes containing the encryption key, must be either 16 or + 24 bytes long + mode -> Optional argument for encryption type, can be either pyDes.ECB + (Electronic Code Book), pyDes.CBC (Cypher Block Chaining) + IV -> Optional Initial Value bytes, must be supplied if using CBC mode. + Must be 8 bytes in length. + pad -> Optional argument, set the pad character (PAD_NORMAL) to use + during all encrypt/decrpt operations done with this instance. + padmode -> Optional argument, set the padding mode (PAD_NORMAL or + PAD_PKCS5) to use during all encrypt/decrpt operations done + with this instance. + """ + def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): + _baseDes.__init__(self, mode, IV, pad, padmode) + self.setKey(key) + + def setKey(self, key): + """Will set the crypting key for this object. Either 16 or 24 bytes long.""" + self.key_size = 24 # Use DES-EDE3 mode + if len(key) != self.key_size: + if len(key) == 16: # Use DES-EDE2 mode + self.key_size = 16 + else: + raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long") + if self.getMode() == CBC: + if not self.getIV(): + # Use the first 8 bytes of the key + self._iv = key[:self.block_size] + if len(self.getIV()) != self.block_size: + raise ValueError("Invalid IV, must be 8 bytes in length") + self.__key1 = des(key[:8], self._mode, self._iv, + self._padding, self._padmode) + self.__key2 = des(key[8:16], self._mode, self._iv, + self._padding, self._padmode) + if self.key_size == 16: + self.__key3 = self.__key1 + else: + self.__key3 = des(key[16:], self._mode, self._iv, + self._padding, self._padmode) + _baseDes.setKey(self, key) + + # Override setter methods to work on all 3 keys. + + def setMode(self, mode): + """Sets the type of crypting mode, pyDes.ECB or pyDes.CBC""" + _baseDes.setMode(self, mode) + for key in (self.__key1, self.__key2, self.__key3): + key.setMode(mode) + + def setPadding(self, pad): + """setPadding() -> bytes of length 1. Padding character.""" + _baseDes.setPadding(self, pad) + for key in (self.__key1, self.__key2, self.__key3): + key.setPadding(pad) + + def setPadMode(self, mode): + """Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" + _baseDes.setPadMode(self, mode) + for key in (self.__key1, self.__key2, self.__key3): + key.setPadMode(mode) + + def setIV(self, IV): + """Will set the Initial Value, used in conjunction with CBC mode""" + _baseDes.setIV(self, IV) + for key in (self.__key1, self.__key2, self.__key3): + key.setIV(IV) + + def encrypt(self, data, pad=None, padmode=None): + """encrypt(data, [pad], [padmode]) -> bytes + + data : bytes to be encrypted + pad : Optional argument for encryption padding. Must only be one byte + padmode : Optional argument for overriding the padding mode. + + The data must be a multiple of 8 bytes and will be encrypted + with the already specified key. Data does not have to be a + multiple of 8 bytes if the padding character is supplied, or + the padmode is set to PAD_PKCS5, as bytes will then added to + ensure the be padded data is a multiple of 8 bytes. + """ + ENCRYPT = des.ENCRYPT + DECRYPT = des.DECRYPT + data = self._guardAgainstUnicode(data) + if pad is not None: + pad = self._guardAgainstUnicode(pad) + # Pad the data accordingly. + data = self._padData(data, pad, padmode) + if self.getMode() == CBC: + self.__key1.setIV(self.getIV()) + self.__key2.setIV(self.getIV()) + self.__key3.setIV(self.getIV()) + i = 0 + result = [] + while i < len(data): + block = self.__key1.crypt(data[i:i+8], ENCRYPT) + block = self.__key2.crypt(block, DECRYPT) + block = self.__key3.crypt(block, ENCRYPT) + self.__key1.setIV(block) + self.__key2.setIV(block) + self.__key3.setIV(block) + result.append(block) + i += 8 + if _pythonMajorVersion < 3: + return ''.join(result) + else: + return bytes.fromhex('').join(result) + else: + data = self.__key1.crypt(data, ENCRYPT) + data = self.__key2.crypt(data, DECRYPT) + return self.__key3.crypt(data, ENCRYPT) + + def decrypt(self, data, pad=None, padmode=None): + """decrypt(data, [pad], [padmode]) -> bytes + + data : bytes to be encrypted + pad : Optional argument for decryption padding. Must only be one byte + padmode : Optional argument for overriding the padding mode. + + The data must be a multiple of 8 bytes and will be decrypted + with the already specified key. In PAD_NORMAL mode, if the + optional padding character is supplied, then the un-encrypted + data will have the padding characters removed from the end of + the bytes. This pad removal only occurs on the last 8 bytes of + the data (last data block). In PAD_PKCS5 mode, the special + padding end markers will be removed from the data after + decrypting, no pad character is required for PAD_PKCS5. + """ + ENCRYPT = des.ENCRYPT + DECRYPT = des.DECRYPT + data = self._guardAgainstUnicode(data) + if pad is not None: + pad = self._guardAgainstUnicode(pad) + if self.getMode() == CBC: + self.__key1.setIV(self.getIV()) + self.__key2.setIV(self.getIV()) + self.__key3.setIV(self.getIV()) + i = 0 + result = [] + while i < len(data): + iv = data[i:i+8] + block = self.__key3.crypt(iv, DECRYPT) + block = self.__key2.crypt(block, ENCRYPT) + block = self.__key1.crypt(block, DECRYPT) + self.__key1.setIV(iv) + self.__key2.setIV(iv) + self.__key3.setIV(iv) + result.append(block) + i += 8 + if _pythonMajorVersion < 3: + data = ''.join(result) + else: + data = bytes.fromhex('').join(result) + else: + data = self.__key3.crypt(data, DECRYPT) + data = self.__key2.crypt(data, ENCRYPT) + data = self.__key1.crypt(data, DECRYPT) + return self._unpadData(data, pad, padmode) diff --git a/plugin.video.alfa/lib/sambatools/smb/utils/sha256.py b/plugin.video.alfa/lib/sambatools/smb/utils/sha256.py index df28791c..a13d6bf3 100755 --- a/plugin.video.alfa/lib/sambatools/smb/utils/sha256.py +++ b/plugin.video.alfa/lib/sambatools/smb/utils/sha256.py @@ -1,112 +1,110 @@ -#!/usr/bin/python -__author__ = 'Thomas Dixon' -__license__ = 'MIT' - -import copy -import struct -import sys - -digest_size = 32 -blocksize = 1 - -def new(m=None): - return sha256(m) - -class sha256(object): - _k = (0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, - 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, - 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, - 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, - 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, - 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2) - _h = (0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, - 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19) - _output_size = 8 - - blocksize = 1 - block_size = 64 - digest_size = 32 - - def __init__(self, m=None): - self._buffer = '' - self._counter = 0 - - if m is not None: - if type(m) is not str: - raise TypeError, '%s() argument 1 must be string, not %s' % (self.__class__.__name__, type(m).__name__) - self.update(m) - - def _rotr(self, x, y): - return ((x >> y) | (x << (32-y))) & 0xFFFFFFFF - - def _sha256_process(self, c): - w = [0]*64 - w[0:15] = struct.unpack('!16L', c) - - for i in range(16, 64): - s0 = self._rotr(w[i-15], 7) ^ self._rotr(w[i-15], 18) ^ (w[i-15] >> 3) - s1 = self._rotr(w[i-2], 17) ^ self._rotr(w[i-2], 19) ^ (w[i-2] >> 10) - w[i] = (w[i-16] + s0 + w[i-7] + s1) & 0xFFFFFFFF - - a,b,c,d,e,f,g,h = self._h - - for i in range(64): - s0 = self._rotr(a, 2) ^ self._rotr(a, 13) ^ self._rotr(a, 22) - maj = (a & b) ^ (a & c) ^ (b & c) - t2 = s0 + maj - s1 = self._rotr(e, 6) ^ self._rotr(e, 11) ^ self._rotr(e, 25) - ch = (e & f) ^ ((~e) & g) - t1 = h + s1 + ch + self._k[i] + w[i] - - h = g - g = f - f = e - e = (d + t1) & 0xFFFFFFFF - d = c - c = b - b = a - a = (t1 + t2) & 0xFFFFFFFF - - self._h = [(x+y) & 0xFFFFFFFF for x,y in zip(self._h, [a,b,c,d,e,f,g,h])] - - def update(self, m): - if not m: - return - if type(m) is not str: - raise TypeError, '%s() argument 1 must be string, not %s' % (sys._getframe().f_code.co_name, type(m).__name__) - - self._buffer += m - self._counter += len(m) - - while len(self._buffer) >= 64: - self._sha256_process(self._buffer[:64]) - self._buffer = self._buffer[64:] - - def digest(self): - mdi = self._counter & 0x3F - length = struct.pack('!Q', self._counter<<3) - - if mdi < 56: - padlen = 55-mdi - else: - padlen = 119-mdi - - r = self.copy() - r.update('\x80'+('\x00'*padlen)+length) - return ''.join([struct.pack('!L', i) for i in r._h[:self._output_size]]) - - def hexdigest(self): - return self.digest().encode('hex') - - def copy(self): - return copy.deepcopy(self) +#!/usr/bin/python +__author__ = 'Thomas Dixon' +__license__ = 'MIT' + +import copy, struct, sys + +digest_size = 32 +blocksize = 1 + +def new(m=None): + return sha256(m) + +class sha256(object): + _k = (0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2) + _h = (0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19) + _output_size = 8 + + blocksize = 1 + block_size = 64 + digest_size = 32 + + def __init__(self, m=None): + self._buffer = '' + self._counter = 0 + + if m is not None: + if type(m) is not str: + raise TypeError, '%s() argument 1 must be string, not %s' % (self.__class__.__name__, type(m).__name__) + self.update(m) + + def _rotr(self, x, y): + return ((x >> y) | (x << (32-y))) & 0xFFFFFFFF + + def _sha256_process(self, c): + w = [0]*64 + w[0:15] = struct.unpack('!16L', c) + + for i in range(16, 64): + s0 = self._rotr(w[i-15], 7) ^ self._rotr(w[i-15], 18) ^ (w[i-15] >> 3) + s1 = self._rotr(w[i-2], 17) ^ self._rotr(w[i-2], 19) ^ (w[i-2] >> 10) + w[i] = (w[i-16] + s0 + w[i-7] + s1) & 0xFFFFFFFF + + a,b,c,d,e,f,g,h = self._h + + for i in range(64): + s0 = self._rotr(a, 2) ^ self._rotr(a, 13) ^ self._rotr(a, 22) + maj = (a & b) ^ (a & c) ^ (b & c) + t2 = s0 + maj + s1 = self._rotr(e, 6) ^ self._rotr(e, 11) ^ self._rotr(e, 25) + ch = (e & f) ^ ((~e) & g) + t1 = h + s1 + ch + self._k[i] + w[i] + + h = g + g = f + f = e + e = (d + t1) & 0xFFFFFFFF + d = c + c = b + b = a + a = (t1 + t2) & 0xFFFFFFFF + + self._h = [(x+y) & 0xFFFFFFFF for x,y in zip(self._h, [a,b,c,d,e,f,g,h])] + + def update(self, m): + if not m: + return + if type(m) is not str: + raise TypeError, '%s() argument 1 must be string, not %s' % (sys._getframe().f_code.co_name, type(m).__name__) + + self._buffer += m + self._counter += len(m) + + while len(self._buffer) >= 64: + self._sha256_process(self._buffer[:64]) + self._buffer = self._buffer[64:] + + def digest(self): + mdi = self._counter & 0x3F + length = struct.pack('!Q', self._counter<<3) + + if mdi < 56: + padlen = 55-mdi + else: + padlen = 119-mdi + + r = self.copy() + r.update('\x80'+('\x00'*padlen)+length) + return ''.join([struct.pack('!L', i) for i in r._h[:self._output_size]]) + + def hexdigest(self): + return self.digest().encode('hex') + + def copy(self): + return copy.deepcopy(self) diff --git a/plugin.video.alfa/platformcode/platformtools.py b/plugin.video.alfa/platformcode/platformtools.py index f046983b..aa322a5e 100644 --- a/plugin.video.alfa/platformcode/platformtools.py +++ b/plugin.video.alfa/platformcode/platformtools.py @@ -1106,6 +1106,8 @@ def play_torrent(item, xlistitem, mediaurl): url_stat = False torrents_path = '' videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca + if videolibrary_path.lower().startswith("smb://"): #Si es una conexión SMB, usamos userdata local + videolibrary_path = config.get_data_path() #Calculamos el path absoluto a partir de Userdata if not filetools.exists(videolibrary_path): #Si no existe el path, pasamos al modo clásico videolibrary_path = False else: @@ -1139,7 +1141,7 @@ def play_torrent(item, xlistitem, mediaurl): folder = movies #películas else: folder = series #o series - item.url = filetools.join(videolibrary_path, folder, item.url) #dirección del .torrent local en la Videoteca + item.url = filetools.join(config.get_videolibrary_path(), folder, item.url) #dirección del .torrent local en la Videoteca if filetools.copy(item.url, torrents_path, silent=True): #se copia a la carpeta generíca para evitar problemas de encode item.url = torrents_path if "torrentin" in torrent_options[seleccion][1]: #Si es Torrentin, hay que añadir un prefijo diff --git a/plugin.video.alfa/platformcode/xbmc_videolibrary.py b/plugin.video.alfa/platformcode/xbmc_videolibrary.py index a5948d5b..73b6de2e 100755 --- a/plugin.video.alfa/platformcode/xbmc_videolibrary.py +++ b/plugin.video.alfa/platformcode/xbmc_videolibrary.py @@ -329,6 +329,7 @@ def mark_content_as_watched_on_alfa(path): from channels import videolibrary from core import videolibrarytools from core import scrapertools + from core import filetools import re """ marca toda la serie o película como vista o no vista en la Videoteca de Alfa basado en su estado en la Videoteca de Kodi @@ -375,7 +376,8 @@ def mark_content_as_watched_on_alfa(path): nfo_name = scrapertools.find_single_match(path2, '\]\/(.*?)$') #Construyo el nombre del .nfo path1 = path1.replace(nfo_name, '') #para la SQL solo necesito la carpeta path2 = path2.replace(nfo_name, '') #para la SQL solo necesito la carpeta - + path2 = filetools.remove_smb_credential(path2) #Si el archivo está en un servidor SMB, quiamos las credenciales + #Ejecutmos la sentencia SQL sql = 'select strFileName, playCount from %s where (strPath like "%s" or strPath like "%s")' % (contentType, path1, path2) nun_records = 0 From 99cac6e22555378e87ddacc41deb8412422f9e3c Mon Sep 17 00:00:00 2001 From: pipcat <pip@pipcat.com> Date: Wed, 28 Nov 2018 09:24:40 +0100 Subject: [PATCH 11/24] Alternativa a ziptools/zipfile --- plugin.video.alfa/platformcode/updater.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/platformcode/updater.py b/plugin.video.alfa/platformcode/updater.py index ce417c0e..059c4158 100644 --- a/plugin.video.alfa/platformcode/updater.py +++ b/plugin.video.alfa/platformcode/updater.py @@ -122,8 +122,13 @@ def check_addon_updates(verbose=False): # Descomprimir zip dentro del addon # --------------------------------- - unzipper = ziptools.ziptools() - unzipper.extract(localfilename, config.get_runtime_path()) + try: + unzipper = ziptools.ziptools() + unzipper.extract(localfilename, config.get_runtime_path()) + except: + import xbmc + xbmc.executebuiltin('XBMC.Extract("%s", "%s")' % (localfilename, config.get_runtime_path())) + time.sleep(1) # Borrar el zip descargado # ------------------------ From 3bd39826b69ca741b695030ef40f291dec26a922 Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Wed, 28 Nov 2018 10:21:40 -0300 Subject: [PATCH 12/24] Correcciones MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - BlogHorror: Corrección por cambio de estructura - PelisPlus: Corrección para series - TuPeliculas: Corrección por cambio de estructura - VeSeriesOnline: Corrección en la detección de enlaces --- plugin.video.alfa/channels/bloghorror.py | 6 +++--- plugin.video.alfa/channels/pelisplus.py | 2 +- plugin.video.alfa/channels/tupelicula.py | 3 --- plugin.video.alfa/channels/veseriesonline.py | 17 +++++++++++------ 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/plugin.video.alfa/channels/bloghorror.py b/plugin.video.alfa/channels/bloghorror.py index 210c62bb..bbbda1d2 100644 --- a/plugin.video.alfa/channels/bloghorror.py +++ b/plugin.video.alfa/channels/bloghorror.py @@ -47,11 +47,11 @@ def list_all(item): itemlist = [] data = get_source(item.url) - patron = '<divclass="post-thumbnail">.?<.*?href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?' + patron = '<article id="post-\d+".*?data-background="([^"]+)".*?href="([^"]+)".*?<h3.*?internal">([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: url = scrapedurl title = scrapertools.find_single_match(scrapedtitle, '(.*?)(?:|\(|\| )\d{4}').strip() year = scrapertools.find_single_match(scrapedtitle, '(\d{4})') @@ -68,7 +68,7 @@ def list_all(item): if itemlist != []: - next_page = scrapertools.find_single_match(data, '<a class="next" href="([^"]+)"') + next_page = scrapertools.find_single_match(data, 'page-numbers current.*?<a class="page-numbers" href="([^"]+)"') if next_page != '': itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>', url=next_page)) else: diff --git a/plugin.video.alfa/channels/pelisplus.py b/plugin.video.alfa/channels/pelisplus.py index 5aac3210..75d86035 100644 --- a/plugin.video.alfa/channels/pelisplus.py +++ b/plugin.video.alfa/channels/pelisplus.py @@ -183,7 +183,7 @@ def episodesxseasons(item): season = item.infoLabels['season'] data=get_source(item.url) season_data = scrapertools.find_single_match(data, 'id="pills-vertical-%s">(.*?)</div>' % season) - patron='href="([^"]+)".*?block">Capitulo(\d+) -.?([^<]+)<' + patron='href="([^"]+)".*?block">Capitulo.?(\d+) -.?([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(season_data) infoLabels = item.infoLabels diff --git a/plugin.video.alfa/channels/tupelicula.py b/plugin.video.alfa/channels/tupelicula.py index f17a1978..3b613e16 100644 --- a/plugin.video.alfa/channels/tupelicula.py +++ b/plugin.video.alfa/channels/tupelicula.py @@ -52,9 +52,6 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, title="Generos", action="section", thumbnail=get_thumb('genres', auto=True))) - itemlist.append(Item(channel=item.channel, title="Por Años", action="section", - thumbnail=get_thumb('year', auto=True))) - itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url=host + 'search?q=', thumbnail=get_thumb('search', auto=True))) diff --git a/plugin.video.alfa/channels/veseriesonline.py b/plugin.video.alfa/channels/veseriesonline.py index 0b9aeef0..39fdf1b6 100644 --- a/plugin.video.alfa/channels/veseriesonline.py +++ b/plugin.video.alfa/channels/veseriesonline.py @@ -4,6 +4,7 @@ # -*- By the Alfa Develop Group -*- import re +import base64 from channels import autoplay from channels import filtertools @@ -178,8 +179,8 @@ def findvideos(item): data = get_source(item.url) video_id = scrapertools.find_single_match(data, 'getEnlaces\((\d+)\)') - links_url = '%s%s%s' % (host,'/link/repro.php/',video_id) - online_url = '%s%s%s' % (host, '/link/enlaces_online.php/', video_id) + links_url = '%s%s%s' % (host,'link/repro.php/',video_id) + online_url = '%s%s%s' % (host, 'link/enlaces_online.php/', video_id) # listado de opciones links_url @@ -223,10 +224,14 @@ def findvideos(item): video_id = scrapertools.find_single_match(scrapedurl, 'index.php/(\d+)/') new_url = '%s%s%s%s' % (host, 'ext/index-include.php?id=', video_id, '&tipo=1') data = get_source(new_url) - video_url = scrapertools.find_single_match(data, '<div class=container><a href=(.*?)>') - video_url = video_url.replace('enlace.php', 'r') - data = httptools.downloadpage(video_url, follow_redirects=False) - url = data.headers['location'] + video_url = scrapertools.find_single_match(data, '<div class=container><a onclick=addURL.*?href=(.*?)>') + video_url = video_url.replace('%3D', '&')+'status' + headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', + 'Referer': item.url} + data = httptools.downloadpage(video_url, headers=headers, ignore_response_code=True).data + b64_url = scrapertools.find_single_match(data, "var string = '([^']+)';")+'==' + url = base64.b64decode(b64_url) + title = '%s '+ '[%s]' % language if url != '': itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language, From 3ac2716ee948ecd052ad19d9e499b8fd73ee2d9e Mon Sep 17 00:00:00 2001 From: Intel1 <luisriverap@hotmail.com> Date: Wed, 28 Nov 2018 08:31:24 -0500 Subject: [PATCH 13/24] Actualizados MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Danimados: Corrección en enlaces <Terror 2018>: Eliminado fanart actualizado httptools: agregado parametro: ignore_response_code mediafire: fix enlaces mp4upload: patron actualizado vevio: patron actualizado --- plugin.video.alfa/channels/danimados.py | 2 + plugin.video.alfa/channels/special.json | 12 --- plugin.video.alfa/channels/special.py | 78 ------------------- plugin.video.alfa/channels/tvmoviedb.py | 2 +- plugin.video.alfa/core/httptools.py | 6 +- .../platformcode/platformtools.py | 2 +- plugin.video.alfa/servers/mediafire.py | 7 +- plugin.video.alfa/servers/mp4upload.json | 2 +- plugin.video.alfa/servers/vevio.json | 4 +- 9 files changed, 12 insertions(+), 103 deletions(-) delete mode 100644 plugin.video.alfa/channels/special.json delete mode 100644 plugin.video.alfa/channels/special.py diff --git a/plugin.video.alfa/channels/danimados.py b/plugin.video.alfa/channels/danimados.py index c319f680..c068ceeb 100644 --- a/plugin.video.alfa/channels/danimados.py +++ b/plugin.video.alfa/channels/danimados.py @@ -171,6 +171,8 @@ def findvideos(item): data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data url1 = scrapertools.find_single_match(data1, "src='([^']+)") url1 = devuelve_enlace(url1) + if "drive.google" in url1: + url1 = url1.replace("view","preview") if url1: itemlist.append(item.clone(title="Ver en %s",url=url1, action="play")) tmdb.set_infoLabels(itemlist) diff --git a/plugin.video.alfa/channels/special.json b/plugin.video.alfa/channels/special.json deleted file mode 100644 index ace9d3a0..00000000 --- a/plugin.video.alfa/channels/special.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "special", - "name": "<Terror 2018>", - "active": true, - "adult": false, - "language": [], - "thumbnail": "https://i.postimg.cc/FR2nygS0/g4567.png", - "banner": "", - "categories": [ - "movie" - ] -} diff --git a/plugin.video.alfa/channels/special.py b/plugin.video.alfa/channels/special.py deleted file mode 100644 index 836d9c19..00000000 --- a/plugin.video.alfa/channels/special.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel Halloween -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- - -import re - -from channels import autoplay -from channels import filtertools -from core import httptools -from core import scrapertools -from core import servertools -from core import jsontools -from core import tmdb -from core.item import Item -from platformcode import config, logger -from channelselector import get_thumb - -host = 'https://www.imdb.com/list/ls027655523/?sort=list_order,asc&st_dt=&mode=detail&page=' - - -def get_source(url): - logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data) - return data - -def mainlist(item): - logger.info() - item.url = host - item.first = 60 - item.last = 80 - item.page = 1 - return list_all(item) - - -def list_all(item): - logger.info() - from core import jsontools - itemlist = [] - - data = get_source('%s%s' % (host, item.page)) - data = scrapertools.find_single_match(data, '"itemListElement":([^\]]+)\]') - data = data + ']' - #logger.debug(data) - movie_list = eval(data) - for movie in movie_list[item.first:item.last]: - - IMDBNumber = movie['url'].replace('title','').replace('/','') - - - new_item = Item(channel='search', contentType='movie', action='do_search', - infoLabels={'imdb_id': IMDBNumber}) - - #new_item.infoLabels = tmdb.find_and_set_infoLabels(new_item) - itemlist.append(new_item) - logger.debug('id %s' % IMDBNumber) - #logger.debug(new_item) - - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - for movie in itemlist: - movie.title = movie.infoLabels['title'] - movie.wanted = movie.title - - if item.last + 20 < len(movie_list): - first = item.last - last = item.last + 20 - page = item.page - else: - first = 0 - last = 20 - page = item.page + 1 - - itemlist.append(Item(channel=item.channel, title='Siguiente >>', action='list_all', - last=last, first=first, page=page)) - return itemlist diff --git a/plugin.video.alfa/channels/tvmoviedb.py b/plugin.video.alfa/channels/tvmoviedb.py index c7244a80..1ec3fc1c 100644 --- a/plugin.video.alfa/channels/tvmoviedb.py +++ b/plugin.video.alfa/channels/tvmoviedb.py @@ -35,7 +35,7 @@ langi = langs[config.get_setting('imdb', "tvmoviedb")] adult_mal = config.get_setting('adult_mal', "tvmoviedb") mal_ck = "MzE1MDQ2cGQ5N2llYTY4Z2xwbGVzZjFzbTY=" images_predef = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" -default_fan = filetools.join(config.get_runtime_path(), "fanart.jpg") +default_fan = filetools.join(config.get_runtime_path(), "fanart1.jpg") def mainlist(item): diff --git a/plugin.video.alfa/core/httptools.py b/plugin.video.alfa/core/httptools.py index fe149f3c..18727998 100644 --- a/plugin.video.alfa/core/httptools.py +++ b/plugin.video.alfa/core/httptools.py @@ -99,7 +99,7 @@ load_cookies() def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=True, cookies=True, replace_headers=False, - add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, random_headers=False): + add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, random_headers=False, ignore_response_code=False): """ Abre una url y retorna los datos obtenidos @@ -124,6 +124,8 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr @type only_headers: bool @param random_headers: Si True, utiliza el método de seleccionar headers aleatorios. @type random_headers: bool + @param ignore_response_code: Si es True, ignora el método para WebErrorException para error como el error 404 en veseriesonline, pero es un data funcional + @type ignore_response_code: bool @return: Resultado de la petición @rtype: HTTPResponse @@ -256,7 +258,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr # error 4xx o 5xx se lanza excepcion (menos para servidores) # response["code"] = 400 # linea de código para probar is_channel = str(is_channel).replace("/servers/","\\servers\\") # Para sistemas operativos diferente a Windows la ruta cambia - if type(response["code"]) == int and "\\servers\\" not in str(is_channel): + if type(response["code"]) == int and "\\servers\\" not in str(is_channel) and not ignore_response_code: if response["code"] > 399 and (server_cloudflare == "cloudflare" and response["code"] != 503): raise WebErrorException(urlparse.urlparse(url)[1]) diff --git a/plugin.video.alfa/platformcode/platformtools.py b/plugin.video.alfa/platformcode/platformtools.py index f046983b..a13e1f8b 100644 --- a/plugin.video.alfa/platformcode/platformtools.py +++ b/plugin.video.alfa/platformcode/platformtools.py @@ -210,7 +210,7 @@ def render_items(itemlist, parent_item): if item.fanart: fanart = item.fanart else: - fanart = os.path.join(config.get_runtime_path(), "fanart.jpg") + fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg") # Creamos el listitem #listitem = xbmcgui.ListItem(item.title) diff --git a/plugin.video.alfa/servers/mediafire.py b/plugin.video.alfa/servers/mediafire.py index 3c34e23d..bdb22bbc 100755 --- a/plugin.video.alfa/servers/mediafire.py +++ b/plugin.video.alfa/servers/mediafire.py @@ -9,26 +9,21 @@ from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data - if "Invalid or Deleted File" in data: return False, "[Mediafire] El archivo no existe o ha sido borrado" elif "File Removed for Violation" in data: return False, "[Mediafire] Archivo eliminado por infracción" - return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) video_urls = [] - data = httptools.downloadpage(page_url).data - patron = 'kNO \= "([^"]+)"' + patron = "DownloadButtonAd-startDownload gbtnSecondary.*?href='([^']+)'" matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) > 0: video_urls.append([matches[0][-4:] + " [mediafire]", matches[0]]) - for video_url in video_urls: logger.info("%s - %s" % (video_url[0], video_url[1])) - return video_urls diff --git a/plugin.video.alfa/servers/mp4upload.json b/plugin.video.alfa/servers/mp4upload.json index b3d68c38..d46b4135 100755 --- a/plugin.video.alfa/servers/mp4upload.json +++ b/plugin.video.alfa/servers/mp4upload.json @@ -6,7 +6,7 @@ ], "patterns": [ { - "pattern": "mp4upload.com/embed-([A-Za-z0-9]+)", + "pattern": "mp4upload.com(?:/embed-|/)([A-Za-z0-9]+)", "url": "http://www.mp4upload.com/embed-\\1.html" } ] diff --git a/plugin.video.alfa/servers/vevio.json b/plugin.video.alfa/servers/vevio.json index d91e95bf..9a5d5600 100644 --- a/plugin.video.alfa/servers/vevio.json +++ b/plugin.video.alfa/servers/vevio.json @@ -4,8 +4,8 @@ "ignore_urls": [], "patterns": [ { - "pattern": "(vev.io/embed/[A-z0-9]+)", - "url": "https://\\1" + "pattern": "vev.io/(?:embed/|)([A-z0-9]+)", + "url": "https://vev.io/embed/\\1" } ] }, From c3d8267e553dc06dfcbec3e21e516ee8a5a53740 Mon Sep 17 00:00:00 2001 From: chivmalev <lbivan187@gmail.com> Date: Wed, 28 Nov 2018 11:11:59 -0300 Subject: [PATCH 14/24] =?UTF-8?q?maxipelis24:correcci=C3=B3n?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/maxipelis24.py | 81 +++++++++++++---------- 1 file changed, 46 insertions(+), 35 deletions(-) diff --git a/plugin.video.alfa/channels/maxipelis24.py b/plugin.video.alfa/channels/maxipelis24.py index 525f8f84..d8d1eb30 100644 --- a/plugin.video.alfa/channels/maxipelis24.py +++ b/plugin.video.alfa/channels/maxipelis24.py @@ -12,7 +12,7 @@ from core.item import Item from platformcode import config, logger from channelselector import get_thumb -host = "http://maxipelis24.com" +host = "https://maxipelis24.tv" def mainlist(item): @@ -42,8 +42,8 @@ def category(item): data = re.sub(r"\n|\r|\t|\s{2}| ","", data) if item.cat == 'genre': - data = scrapertools.find_single_match(data, '<h3>Géneros.*?</div>') - patron = '<a href="([^"]+)">([^<]+)<' + data = scrapertools.find_single_match(data, '<h3>Géneros <span class="icon-sort">.*?</ul>') + patron = '<li class="cat-item cat-item.*?<a href="([^"]+)" >([^<]+)<' elif item.cat == 'year': data = scrapertools.find_single_match(data, '<h3>Año de estreno.*?</div>') patron = 'li><a href="([^"]+)">([^<]+).*?<' @@ -65,15 +65,14 @@ def movies(item): patron = '<div id="mt.+?href="([^"]+)".+?' patron += '<img src="([^"]+)" alt="([^"]+)".+?' - patron += '<span class="imdb">.*?>([^<]+)<.*?' patron += '<span class="ttx">([^<]+).*?' patron += 'class="year">([^<]+).+?class="calidad2">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, img, scrapedtitle, ranking, resto, year, quality in matches: + for scrapedurl, img, scrapedtitle, resto, year, quality in matches: scrapedtitle = re.sub(r'\d{4}|[()]','', scrapedtitle) plot = scrapertools.htmlclean(resto).strip() - title = ' %s [COLOR yellow](%s)[/COLOR] [COLOR red][%s][/COLOR]' % (scrapedtitle, ranking, quality) + title = ' %s [COLOR red][%s][/COLOR]' % (scrapedtitle, quality) itemlist.append(Item(channel = item.channel, title = title, url = scrapedurl, @@ -87,7 +86,7 @@ def movies(item): tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) #Paginacion - matches = re.compile('<div class="pag_.*?href="([^"]+)">Siguiente<', re.DOTALL).findall(data) + matches = re.compile('class="respo_pag"><div class="pag.*?<a href="([^"]+)" >Siguiente</a><', re.DOTALL).findall(data) if matches: url = urlparse.urljoin(item.url, matches[0]) itemlist.append(Item(channel = item.channel, action = "movies", title = "Página siguiente >>", url = url)) @@ -99,32 +98,44 @@ def findvideos(item): itemlist = [] data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data, '<div id="contenedor">(.*?)</div></div></div>') - # Busca los enlaces a los videos - listavideos = servertools.findvideos(data) - for video in listavideos: - videotitle = scrapertools.unescape(video[0]) - url = video[1] - server = video[2] - itemlist.append(Item(channel = item.channel, - action = "play", - server = server, - title = videotitle, - url = url, - thumbnail = item.thumbnail, - plot = item.plot, - contentTitle = item.contentTitle, - infoLabels = item.infoLabels, - folder = False)) - # Opción "Añadir esta película a la biblioteca de KODI" - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append(Item(channel = item.channel, - title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url = item.url, - action = "add_pelicula_to_library", - extra = "findvideos", - contentTitle = item.contentTitle, - thumbnail = item.thumbnail - )) + data = re.sub(r"\n|\r|\t|\s{2}| ","", data) + patron = scrapertools.find_single_match(data, '<div id="player2">(.*?)</div>') + patron = '<div id="div.*?<div class="movieplay">.+?[a-zA-Z]="([^&]+)&' + + matches = re.compile(patron, re.DOTALL).findall(data) - return itemlist + for link in matches: + if 'id=' in link: + id_type = 'id' + ir_type = 'ir' + elif 'ud=' in link: + id_type = 'ud' + ir_type = 'ur' + elif 'od=' in link: + id_type = 'od' + ir_type = 'or' + elif 'ad=' in link: + id_type = 'ad' + ir_type = 'ar' + elif 'ed=' in link: + id_type = 'ed' + ir_type = 'er' + + id = scrapertools.find_single_match(link, '%s=(.*)' % id_type) + base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type) + + ir = id[::-1] + referer = base_link+'%s=%s&/' % (id_type, ir) + video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer':referer}, + follow_redirects=False) + url = video_data.headers['location'] + title = '%s' + + itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', + language='', infoLabels=item.infoLabels)) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) + + + + return itemlist \ No newline at end of file From a9e3a12869c29d9e75a497e7e2124e11ee8951f5 Mon Sep 17 00:00:00 2001 From: chivmalev <lbivan187@gmail.com> Date: Wed, 28 Nov 2018 11:49:13 -0300 Subject: [PATCH 15/24] =?UTF-8?q?maxipelis24:correcci=C3=B3n?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/maxipelis24.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/maxipelis24.py b/plugin.video.alfa/channels/maxipelis24.py index d8d1eb30..90d70ac1 100644 --- a/plugin.video.alfa/channels/maxipelis24.py +++ b/plugin.video.alfa/channels/maxipelis24.py @@ -100,7 +100,7 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ","", data) patron = scrapertools.find_single_match(data, '<div id="player2">(.*?)</div>') - patron = '<div id="div.*?<div class="movieplay">.+?[a-zA-Z]="([^&]+)&' + patron = '<div id="div.*?<div class="movieplay">.*?(?:iframe src|IFRAME src)="([^&]+)&' matches = re.compile(patron, re.DOTALL).findall(data) From 0fd2260f185b9f7ed711720a00d5688e621a1843 Mon Sep 17 00:00:00 2001 From: chivmalev <lbivan187@gmail.com> Date: Wed, 28 Nov 2018 12:10:01 -0300 Subject: [PATCH 16/24] =?UTF-8?q?maxipelis24:correcci=C3=B3n?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/maxipelis24.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/maxipelis24.py b/plugin.video.alfa/channels/maxipelis24.py index 90d70ac1..d8d1eb30 100644 --- a/plugin.video.alfa/channels/maxipelis24.py +++ b/plugin.video.alfa/channels/maxipelis24.py @@ -100,7 +100,7 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ","", data) patron = scrapertools.find_single_match(data, '<div id="player2">(.*?)</div>') - patron = '<div id="div.*?<div class="movieplay">.*?(?:iframe src|IFRAME src)="([^&]+)&' + patron = '<div id="div.*?<div class="movieplay">.+?[a-zA-Z]="([^&]+)&' matches = re.compile(patron, re.DOTALL).findall(data) From 2f623770353c3c75e28397344dc3868aa78b459a Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 28 Nov 2018 17:07:21 +0100 Subject: [PATCH 17/24] SMB: mejoras y correcciones --- plugin.video.alfa/channels/videolibrary.py | 3 ++ plugin.video.alfa/core/videolibrarytools.py | 2 +- plugin.video.alfa/lib/generictools.py | 40 +++++++++++++------ .../platformcode/xbmc_videolibrary.py | 3 +- 4 files changed, 33 insertions(+), 15 deletions(-) diff --git a/plugin.video.alfa/channels/videolibrary.py b/plugin.video.alfa/channels/videolibrary.py index e2edd5c1..c455db19 100644 --- a/plugin.video.alfa/channels/videolibrary.py +++ b/plugin.video.alfa/channels/videolibrary.py @@ -52,6 +52,9 @@ def list_movies(item, silent=False): head_nfo, new_item = videolibrarytools.read_nfo(nfo_path) + if not new_item: #Si no ha leído bien el .nfo, pasamos a la siguiente + continue + if len(new_item.library_urls) > 1: multicanal = True else: diff --git a/plugin.video.alfa/core/videolibrarytools.py b/plugin.video.alfa/core/videolibrarytools.py index b4964de2..db85bc71 100644 --- a/plugin.video.alfa/core/videolibrarytools.py +++ b/plugin.video.alfa/core/videolibrarytools.py @@ -828,7 +828,7 @@ def caching_torrents(url, torrents_path=None, timeout=10, lookup=False, data_tor return torrents_path #Si hay un error, devolvemos el "path" vacío torrent_file = response.data - if not scrapertools.find_single_match(torrent_file, '^d\d+:\w+\d+:'): #No es un archivo .torrent (RAR, ZIP, HTML,..., vacío) + if not scrapertools.find_single_match(torrent_file, '^d\d+:.*?\d+:'): #No es un archivo .torrent (RAR, ZIP, HTML,..., vacío) logger.error('No es un archivo Torrent: ' + url) torrents_path = '' if data_torrent: diff --git a/plugin.video.alfa/lib/generictools.py b/plugin.video.alfa/lib/generictools.py index eb549794..6a1dcfde 100644 --- a/plugin.video.alfa/lib/generictools.py +++ b/plugin.video.alfa/lib/generictools.py @@ -1164,7 +1164,7 @@ def post_tmdb_findvideos(item, itemlist): title_gen = '[COLOR yellow]%s [/COLOR][ALT]: %s' % (item.category.capitalize(), title_gen) #elif (config.get_setting("quit_channel_name", "videolibrary") == 1 or item.channel == channel_py) and item.contentChannel == "videolibrary": else: - title_gen = '%s: %s' % (item.category.capitalize(), title_gen) + title_gen = '[COLOR white]%s: %s' % (item.category.capitalize(), title_gen) #Si intervención judicial, alerto!!! if item.intervencion: @@ -1768,7 +1768,6 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F item_back = item.clone() it_back = item.clone() ow_force_param = True - channel_enabled = False update_stat = 0 delete_stat = 0 canal_org_des_list = [] @@ -1883,13 +1882,15 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F """ try: - if item.url: #Viene de actualización de videoteca de series + if item.url and not channel_py in item.url and it.emergency_urls: #Viene de actualización de videoteca de series #Analizamos si el canal ya tiene las urls de emergencia: guardar o borrar if (config.get_setting("emergency_urls", item.channel) == 1 and (not item.emergency_urls or (item.emergency_urls and not item.emergency_urls.get(channel_alt, False)))) or (config.get_setting("emergency_urls", item.channel) == 2 and item.emergency_urls.get(channel_alt, False)) or config.get_setting("emergency_urls", item.channel) == 3 or emergency_urls_force: intervencion += ", ('1', '%s', '%s', '', '', '', '', '', '', '', '*', '%s', 'emerg')" % (channel_alt, channel_alt, config.get_setting("emergency_urls", item.channel)) elif it.library_urls: #Viene de "listar peliculas´" for canal_vid, url_vid in it.library_urls.items(): #Se recorre "item.library_urls" para buscar canales candidatos + if canal_vid == channel_py: #Si tiene Newcpt1 en canal, es un error + continue canal_vid_alt = "'%s'" % canal_vid if canal_vid_alt in fail_over_list: #Se busca si es un clone de newpct1 channel_bis = channel_py @@ -1903,7 +1904,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F logger.error(traceback.format_exc()) #Ahora tratamos las webs intervenidas, tranformamos la url, el nfo y borramos los archivos obsoletos de la serie - if channel not in intervencion and channel_py_alt not in intervencion and category not in intervencion and channel_alt != 'videolibrary': #lookup + if (channel not in intervencion and channel_py_alt not in intervencion and category not in intervencion and channel_alt != 'videolibrary') or not item.infoLabels: #lookup return (item, it, overwrite) #... el canal/clone está listado import ast @@ -1915,7 +1916,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F for activo, canal_org, canal_des, url_org, url_des, patron1, patron2, patron3, patron4, patron5, content_inc, content_exc, ow_force in intervencion_list: opt = '' #Es esta nuestra entrada? - if activo == '1' and (canal_org == channel_alt or canal_org == item.channel or channel_alt == 'videolibrary' or ow_force == 'del' or ow_force == 'emerg'): + if activo == '1' and (canal_org == channel_alt or canal_org == item.category.lower() or channel_alt == 'videolibrary' or ow_force == 'del' or ow_force == 'emerg'): if ow_force == 'del' or ow_force == 'emerg': #Si es un borrado de estructuras erroneas, hacemos un proceso aparte canal_des_def = canal_des #Si hay canal de sustitución para item.library_urls, lo usamos @@ -1981,10 +1982,15 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F continue if item.contentType in content_exc: #Está el contenido excluido? continue + channel_enabled = 0 + channel_enabled_alt = 1 if item.channel != channel_py: - channel_enabled = channeltools.is_enabled(channel_alt) #Verificamos que el canal esté inactivo - channel_enabled_alt = config.get_setting('enabled', channel_alt) - channel_enabled = channel_enabled * channel_enabled_alt #Si está inactivo en algún sitio, tomamos eso + try: + if channeltools.is_enabled(channel_alt): channel_enabled = 1 #Verificamos que el canal esté inactivo + if config.get_setting('enabled', channel_alt) == False: channel_enabled_alt = 0 + channel_enabled = channel_enabled * channel_enabled_alt #Si está inactivo en algún sitio, tomamos eso + except: + pass if channel_enabled == 1 and canal_org != canal_des: #Si el canal está activo, puede ser solo... continue #... una intervención que afecte solo a una región if ow_force == 'no' and it.library_urls: #Esta regla solo vale para findvideos... @@ -2012,6 +2018,9 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F url += scrapertools.find_single_match(url_total, patron5) #La aplicamos a url if url: url_total = url #Guardamos la suma de los resultados intermedios + if item.channel == channel_py or channel in fail_over_list: #Si es Newpct1... + if item.contentType == "tvshow": + url_total = re.sub(r'\/\d+\/?$', '', url_total) #parece que con el título encuentra la serie, normalmente... update_stat += 1 #Ya hemos actualizado algo canal_org_des_list += [(canal_org, canal_des, url_total, opt, ow_force)] #salvamos el resultado para su proceso @@ -2019,13 +2028,10 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F if (update_stat > 0 and path != False) or item.ow_force == '1': logger.error('** Lista de Actualizaciones a realizar: ' + str(canal_org_des_list)) for canal_org_def, canal_des_def, url_total, opt_def, ow_force_def in canal_org_des_list: #pasamos por todas las "parejas" cambiadas + url_total_def = url_total if ow_force_def != 'del' and ow_force_def != 'emerg': - url_total_def = url_total - if (item.channel == channel_py or channel in fail_over_list): #Si es Newpct1... - if item.contentType == "tvshow": - url_total_def = re.sub(r'\/\d+\/?$', '', url_total) #parece que con el título encuentra la serie, normalmente... if item.url: - item.url = url_total_def #Salvamos la url convertida + item.url = url_total #Salvamos la url convertida if item.library_urls: item.library_urls.pop(canal_org_def, None) item.library_urls.update({canal_des_def: url_total}) @@ -2048,10 +2054,18 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F #Verificamos que las webs de los canales estén activas antes de borrar los .json, para asegurar que se pueden regenerar i = 0 for canal_org_def, canal_des_def, url_total, opt_def, ow_force_def in canal_org_des_list: #pasamos por las "parejas" a borrar + if "magnet:" in url_total or type(url_total) != str: #Si la url es un Magnet, o es una lista, pasamos + i += 1 + continue try: response = httptools.downloadpage(url_total, only_headers=True) except: logger.error(traceback.format_exc()) + logger.error('Web ' + canal_des_def.upper() + ' ERROR. Regla no procesada: ' + str(canal_org_des_list[i])) + item = item_back.clone() #Restauro las imágenes inciales + it = it_back.clone() + item.torrent_caching_fail = True #Marcamos el proceso como fallido + return (item, it, False) if not response.sucess: logger.error('Web ' + canal_des_def.upper() + ' INACTIVA. Regla no procesada: ' + str(canal_org_des_list[i])) item = item_back.clone() #Restauro las imágenes inciales diff --git a/plugin.video.alfa/platformcode/xbmc_videolibrary.py b/plugin.video.alfa/platformcode/xbmc_videolibrary.py index 73b6de2e..7023fcd4 100755 --- a/plugin.video.alfa/platformcode/xbmc_videolibrary.py +++ b/plugin.video.alfa/platformcode/xbmc_videolibrary.py @@ -488,7 +488,8 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""): else: update_path = filetools.join(videolibrarypath, folder_content, folder) + "/" - payload["params"] = {"directory": update_path} + if not update_path.startswith("smb://"): + payload["params"] = {"directory": update_path} while xbmc.getCondVisibility('Library.IsScanningVideo()'): xbmc.sleep(500) From 545c509742ce92af6b5eb31e9f9ff9425eb5bb22 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 28 Nov 2018 17:09:17 +0100 Subject: [PATCH 18/24] Cacheo de enlaces de emergencias: Adaptados: - DivxTotal - Pelismagnet - Subtorrents - Todopleiculas - Zonatorrent --- plugin.video.alfa/channels/divxtotal.py | 3 +- plugin.video.alfa/channels/newpct1.json | 2 +- plugin.video.alfa/channels/pelismagnet.json | 22 +++ plugin.video.alfa/channels/pelismagnet.py | 62 +++++-- plugin.video.alfa/channels/subtorrents.json | 22 +++ plugin.video.alfa/channels/subtorrents.py | 172 ++++++++++++------ plugin.video.alfa/channels/todopeliculas.json | 22 +++ plugin.video.alfa/channels/todopeliculas.py | 105 ++++++++--- plugin.video.alfa/channels/zonatorrent.json | 22 +++ plugin.video.alfa/channels/zonatorrent.py | 96 ++++++++-- 10 files changed, 407 insertions(+), 121 deletions(-) diff --git a/plugin.video.alfa/channels/divxtotal.py b/plugin.video.alfa/channels/divxtotal.py index e03a3c1b..2e80f2f3 100644 --- a/plugin.video.alfa/channels/divxtotal.py +++ b/plugin.video.alfa/channels/divxtotal.py @@ -577,7 +577,8 @@ def findvideos(item): return item #... y nos vamos #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB - item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) + if not item.videolibray_emergency_urls: + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Ahora tratamos los enlaces .torrent for scrapedurl in matches: #leemos los torrents con la diferentes calidades diff --git a/plugin.video.alfa/channels/newpct1.json b/plugin.video.alfa/channels/newpct1.json index 24a47145..ec77f3fa 100644 --- a/plugin.video.alfa/channels/newpct1.json +++ b/plugin.video.alfa/channels/newpct1.json @@ -100,7 +100,7 @@ "id": "intervenidos_channels_list", "type": "text", "label": "Lista de canales y clones de NewPct1 intervenidos y orden de sustitución de URLs", - "default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('0', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('0', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto')", + "default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('0', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('1', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto'), ('1', 'newpct1', 'descargas2020', 'http://www.newpct1.com', 'http://descargas2020.com', '', '', '', '', '', '*', '', 'force')", "enabled": true, "visible": false }, diff --git a/plugin.video.alfa/channels/pelismagnet.json b/plugin.video.alfa/channels/pelismagnet.json index b8c65189..43e361ea 100755 --- a/plugin.video.alfa/channels/pelismagnet.json +++ b/plugin.video.alfa/channels/pelismagnet.json @@ -45,6 +45,28 @@ "VOSE" ] }, + { + "id": "emergency_urls", + "type": "list", + "label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?", + "default": 1, + "enabled": true, + "visible": true, + "lvalues": [ + "No", + "Guardar", + "Borrar", + "Actualizar" + ] + }, + { + "id": "emergency_urls_torrents", + "type": "bool", + "label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?", + "default": true, + "enabled": true, + "visible": "!eq(-1,'No')" + }, { "id": "include_in_newest_torrent", "type": "bool", diff --git a/plugin.video.alfa/channels/pelismagnet.py b/plugin.video.alfa/channels/pelismagnet.py index 1a658863..414e772a 100644 --- a/plugin.video.alfa/channels/pelismagnet.py +++ b/plugin.video.alfa/channels/pelismagnet.py @@ -355,7 +355,7 @@ def listado(item): title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title) title = title.replace('()', '').replace('[]', '').strip().lower().title() - item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título + item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título #Salvamos el título según el tipo de contenido if item_local.contentType == "movie": @@ -387,8 +387,8 @@ def listado(item): title = '%s' % curr_page - if cnt_matches + 1 >= last_title: #Si hemos pintado ya todo lo de esta página... - cnt_matches = 0 #... la próxima pasada leeremos otra página + if cnt_matches + 1 >= last_title: #Si hemos pintado ya todo lo de esta página... + cnt_matches = 0 #... la próxima pasada leeremos otra página next_page_url = re.sub(r'page=(\d+)', r'page=' + str(int(re.search('\d+', next_page_url).group()) + 1), next_page_url) itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, url=next_page_url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page + 1), cnt_matches=str(cnt_matches))) @@ -399,10 +399,10 @@ def listado(item): def findvideos(item): logger.info() itemlist = [] - itemlist_t = [] #Itemlist total de enlaces - itemlist_f = [] #Itemlist de enlaces filtrados + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados if not item.language: - item.language = ['CAST'] #Castellano por defecto + item.language = ['CAST'] #Castellano por defecto matches = [] item.category = categoria @@ -412,22 +412,53 @@ def findvideos(item): #logger.debug(item) matches = item.url - if not matches: #error - logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web: " + item) + if not matches: #error + logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web: " + str(item)) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos #logger.debug(matches) + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls = [] #Iniciamos emergency_urls + item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales + item.emergency_urls.append(matches) #Salvamnos matches... + #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB - item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) + if not item.videolibray_emergency_urls: + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Ahora tratamos los enlaces .torrent - for scrapedurl, quality in matches: #leemos los magnets con la diferentes calidades + for scrapedurl, quality in matches: #leemos los magnets con la diferentes calidades #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() item_local.url = scrapedurl + if item.videolibray_emergency_urls: + item.emergency_urls[0].append(scrapedurl) #guardamos la url y pasamos a la siguiente + continue + if item.emergency_urls and not item.videolibray_emergency_urls: + item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA + if item.armagedon: + item_local.url = item.emergency_urls[0][0] #... ponemos la emergencia como primaria + del item.emergency_urls[0][0] #Una vez tratado lo limpiamos + + size = '' + if not item.armagedon: + size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent + if size: + quality += ' [%s]' % size + if item.armagedon: #Si es catastrófico, lo marcamos + quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % quality #Añadimos la calidad y copiamos la duración item_local.quality = quality @@ -445,9 +476,9 @@ def findvideos(item): item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality).strip() item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() - item_local.alive = "??" #Calidad del link sin verificar - item_local.action = "play" #Visualizar vídeo - item_local.server = "torrent" #Servidor Torrent + item_local.alive = "??" #Calidad del link sin verificar + item_local.action = "play" #Visualizar vídeo + item_local.server = "torrent" #Servidor Torrent itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas @@ -459,6 +490,9 @@ def findvideos(item): #logger.debug(item_local) + if item.videolibray_emergency_urls: #Si ya hemos guardado todas las urls... + return item #... nos vamos + if len(itemlist_f) > 0: #Si hay entradas filtradas... itemlist.extend(itemlist_f) #Pintamos pantalla filtrada else: diff --git a/plugin.video.alfa/channels/subtorrents.json b/plugin.video.alfa/channels/subtorrents.json index 4d5db68a..f360f732 100644 --- a/plugin.video.alfa/channels/subtorrents.json +++ b/plugin.video.alfa/channels/subtorrents.json @@ -44,6 +44,28 @@ "VOSE" ] }, + { + "id": "emergency_urls", + "type": "list", + "label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?", + "default": 1, + "enabled": true, + "visible": true, + "lvalues": [ + "No", + "Guardar", + "Borrar", + "Actualizar" + ] + }, + { + "id": "emergency_urls_torrents", + "type": "bool", + "label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?", + "default": true, + "enabled": true, + "visible": "!eq(-1,'No')" + }, { "id": "timeout_downloadpage", "type": "list", diff --git a/plugin.video.alfa/channels/subtorrents.py b/plugin.video.alfa/channels/subtorrents.py index 07f3c693..60b691d9 100644 --- a/plugin.video.alfa/channels/subtorrents.py +++ b/plugin.video.alfa/channels/subtorrents.py @@ -372,6 +372,7 @@ def findvideos(item): if not item.language: item.language = ['CAST'] #Castellano por defecto matches = [] + subtitles = [] item.category = categoria #logger.debug(item) @@ -389,51 +390,74 @@ def findvideos(item): if not data: logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos - - #Extraemos el thumb - if not item.thumbnail: - item.thumbnail = scrapertools.find_single_match(data, patron) #guardamos thumb si no existe - #Extraemos quality, audio, year, country, size, scrapedlanguage - patron = '<\/script><\/div><ul>(?:<li><label>Fecha de estreno <\/label>[^<]+<\/li>)?(?:<li><label>Genero <\/label>[^<]+<\/li>)?(?:<li><label>Calidad <\/label>([^<]+)<\/li>)?(?:<li><label>Audio <\/label>([^<]+)<\/li>)?(?:<li><label>Fecha <\/label>.*?(\d+)<\/li>)?(?:<li><label>Pais de Origen <\/label>([^<]+)<\/li>)?(?:<li><label>Tamaño <\/label>([^<]+)<\/li>)?(<li> Idioma[^<]+<img src=.*?<br \/><\/li>)?' - try: - quality, audio, year, country, size, scrapedlanguage = scrapertools.find_single_match(data, patron) - except: - quality = '' - audio = '' - year = '' - country = '' - size = '' - scrapedlanguage = '' - if quality: item.quality = quality - if audio: item.quality += ' %s' % audio.strip() - if not item.infoLabels['year'] and year: item.infoLabels['year'] = year - if size: item.quality += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip() - if size: item.title += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip() - language = [] - matches = re.compile('(\d+.png)', re.DOTALL).findall(scrapedlanguage) - for lang in matches: - if "1.png" in lang and not 'CAST' in language: language += ['CAST'] - if "512.png" in lang and not 'LAT' in language: language += ['LAT'] - if ("1.png" not in lang and "512.png" not in lang) and not 'VOSE' in language: language += ['VOSE'] - if language: item.language = language - - #Extraemos los enlaces .torrent - ##Modalidad de varios archivos - patron = '<div class="fichadescargat"><\/div><div class="table-responsive"[^>]+>.*?<\/thead><tbody>(.*?)<\/tbody><\/table><\/div>' - if scrapertools.find_single_match(data, patron): - data_torrents = scrapertools.find_single_match(data, patron) - patron = '<tr><td>.*?<\/td><td><a href="([^"]+)"[^>]+><[^>]+><\/a><\/td><\/tr>' - #Modalidad de un archivo - else: - data_torrents = data - patron = '<div class="fichasubtitulos">.*?<\/div><\/li><\/ul>.*?<a href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data_torrents) - if not matches: #error - logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) - itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches de vídeos + subtitles = item.emergency_urls[2] #Restauramos matches de subtítulos + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + + if not item.armagedon: + #Extraemos el thumb + if not item.thumbnail: + item.thumbnail = scrapertools.find_single_match(data, patron) #guardamos thumb si no existe + + #Extraemos quality, audio, year, country, size, scrapedlanguage + patron = '<\/script><\/div><ul>(?:<li><label>Fecha de estreno <\/label>[^<]+<\/li>)?(?:<li><label>Genero <\/label>[^<]+<\/li>)?(?:<li><label>Calidad <\/label>([^<]+)<\/li>)?(?:<li><label>Audio <\/label>([^<]+)<\/li>)?(?:<li><label>Fecha <\/label>.*?(\d+)<\/li>)?(?:<li><label>Pais de Origen <\/label>([^<]+)<\/li>)?(?:<li><label>Tamaño <\/label>([^<]+)<\/li>)?(<li> Idioma[^<]+<img src=.*?<br \/><\/li>)?' + try: + quality = '' + audio = '' + year = '' + country = '' + size = '' + scrapedlanguage = '' + quality, audio, year, country, size, scrapedlanguage = scrapertools.find_single_match(data, patron) + except: + pass + if quality: item.quality = quality + if audio: item.quality += ' %s' % audio.strip() + if not item.infoLabels['year'] and year: item.infoLabels['year'] = year + if size: item.quality += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip() + if size: + item.title = re.sub(r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '', item.title) #Quitamos size de título, si lo traía + item.title += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip() + + language = [] + matches_lang = re.compile('(\d+.png)', re.DOTALL).findall(scrapedlanguage) + for lang in matches_lang: + if "1.png" in lang and not 'CAST' in language: language += ['CAST'] + if "512.png" in lang and not 'LAT' in language: language += ['LAT'] + if ("1.png" not in lang and "512.png" not in lang) and not 'VOSE' in language: language += ['VOSE'] + if language: item.language = language + + #Extraemos los enlaces .torrent + #Modalidad de varios archivos + patron = '<div class="fichadescargat"><\/div><div class="table-responsive"[^>]+>.*?<\/thead><tbody>(.*?)<\/tbody><\/table><\/div>' + if scrapertools.find_single_match(data, patron): + data_torrents = scrapertools.find_single_match(data, patron) + patron = '<tr><td>.*?<\/td><td><a href="([^"]+)"[^>]+><[^>]+><\/a><\/td><\/tr>' + #Modalidad de un archivo + else: + data_torrents = data + patron = '<div class="fichasubtitulos">.*?<\/div><\/li><\/ul>.*?<a href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data_torrents) + if not matches: #error + logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches de vídeos + subtitles = item.emergency_urls[2] #Restauramos matches de subtítulos + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos else: #SERIES: ya viene con las urls data = item.url #inicio data por compatibilidad @@ -447,11 +471,22 @@ def findvideos(item): del item.subtitle else: subtitle = scrapertools.find_single_match(data, patron).replace('&', '&').replace('.io/', sufix).replace('.com/', sufix) - data_subtitle = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(subtitle, timeout=timeout).data) - patron = '<tbody>(<tr class="fichserietabla_b">.*?<\/tr>)<\/tbody>' #salvamos el bloque - data_subtitle = scrapertools.find_single_match(data_subtitle, patron) - patron = '<tr class="fichserietabla_b">.*?<a href="([^"]+)"' - subtitles = re.compile(patron, re.DOTALL).findall(data_subtitle) #Creamos una lista con todos los sub-títulos + + try: + data_subtitle = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(subtitle, timeout=timeout).data) + except: + pass + + if not data_subtitle: + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches de vídeos + subtitles = item.emergency_urls[2] #Restauramos matches de subtítulos + item.armagedon = True #Marcamos la situación como catastrófica + else: + patron = '<tbody>(<tr class="fichserietabla_b">.*?<\/tr>)<\/tbody>' #salvamos el bloque + data_subtitle = scrapertools.find_single_match(data_subtitle, patron) + patron = '<tr class="fichserietabla_b">.*?<a href="([^"]+)"' + subtitles = re.compile(patron, re.DOTALL).findall(data_subtitle) #Creamos una lista con todos los sub-títulos if subtitles: item.subtitle = [] for subtitle in subtitles: @@ -460,29 +495,49 @@ def findvideos(item): #logger.debug("PATRON: " + patron) #logger.debug(matches) + #logger.debug(subtitles) #logger.debug(data) + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls = [] #Iniciamos emergency_urls + item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales + item.emergency_urls.append(matches) #Salvamnos matches de los vídeos... + item.emergency_urls.append(subtitles) #Salvamnos matches de los subtítulos + #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB - item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) + if not item.videolibray_emergency_urls: + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Ahora tratamos los enlaces .torrent for scrapedurl in matches: #leemos los torrents con la diferentes calidades #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() + item_local.url = scrapedurl.replace('&', '&').replace('.io/', sufix).replace('.com/', sufix) + if item.videolibray_emergency_urls: + item.emergency_urls[0].append(scrapedurl) #guardamos la url y pasamos a la siguiente + continue + if item.emergency_urls and not item.videolibray_emergency_urls: + item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA + if item.armagedon: + item_local.url = item.emergency_urls[0][0] #... ponemos la emergencia como primaria + del item.emergency_urls[0][0] #Una vez tratado lo limpiamos + #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent - size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]') - if not size: + size = scrapertools.find_single_match(item_local.quality, '\s*\[(\d+,?\d*?\s\w\s*[b|B])\]') + if not size and not item.armagedon: size = generictools.get_torrent_size(scrapedurl) #Buscamos el tamaño en el .torrent if size: - item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía - item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b') - item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía - item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad + item_local.title = re.sub(r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía + item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título + item_local.quality = re.sub(r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía + item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad + if item.armagedon: #Si es catastrófico, lo marcamos + item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality #Ahora pintamos el link del Torrent - item_local.url = scrapedurl.replace('&', '&').replace('.io/', sufix).replace('.com/', sufix) item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título y calidad, quitamos etiquetas vacías @@ -506,6 +561,9 @@ def findvideos(item): #logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) #logger.debug(item_local) + if item.videolibray_emergency_urls: #Si ya hemos guardado todas las urls... + return item #... nos vamos + if len(itemlist_f) > 0: #Si hay entradas filtradas... itemlist.extend(itemlist_f) #Pintamos pantalla filtrada else: diff --git a/plugin.video.alfa/channels/todopeliculas.json b/plugin.video.alfa/channels/todopeliculas.json index b96ab647..a39232cb 100644 --- a/plugin.video.alfa/channels/todopeliculas.json +++ b/plugin.video.alfa/channels/todopeliculas.json @@ -45,6 +45,28 @@ "VOSE" ] }, + { + "id": "emergency_urls", + "type": "list", + "label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?", + "default": 1, + "enabled": true, + "visible": true, + "lvalues": [ + "No", + "Guardar", + "Borrar", + "Actualizar" + ] + }, + { + "id": "emergency_urls_torrents", + "type": "bool", + "label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?", + "default": true, + "enabled": true, + "visible": "!eq(-1,'No')" + }, { "id": "include_in_newest_peliculas", "type": "bool", diff --git a/plugin.video.alfa/channels/todopeliculas.py b/plugin.video.alfa/channels/todopeliculas.py index b10bcfda..6b7b68ab 100644 --- a/plugin.video.alfa/channels/todopeliculas.py +++ b/plugin.video.alfa/channels/todopeliculas.py @@ -345,56 +345,100 @@ def findvideos(item): if not data: logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos - matches = re.compile(patron, re.DOTALL).findall(data) + if not item.armagedon: + matches = re.compile(patron, re.DOTALL).findall(data) if not matches: #error - logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) - itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada + if item.intervencion: #Sí ha sido clausurada judicialmente + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error + else: + logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos #logger.debug("PATRON: " + patron) #logger.debug(matches) #logger.debug(data) + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls = [] #Iniciamos emergency_urls + item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales + item.emergency_urls.append(matches) #Salvamnos matches de los vídeos... + #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB - item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) + if not item.videolibray_emergency_urls: + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Ahora tratamos los enlaces .torrent - for scrapedurl in matches: #leemos los torrents con la diferentes calidades - if 'javascript' in scrapedurl: #evitamos la basura + for scrapedurl in matches: #leemos los torrents con la diferentes calidades + if 'javascript' in scrapedurl: #evitamos la basura continue - - url = urlparse.urljoin(host, scrapedurl) - #Leemos la siguiente página, que es de verdad donde está el magnet/torrent - try: - data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(url, timeout=timeout).data) - data = unicode(data, "utf-8", errors="replace").encode("utf-8") - except: - pass - - patron = "window.open\('([^']+)'" - url = scrapertools.find_single_match(data, patron) - if not url: #error - logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) - itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) - continue #si no hay más datos, algo no funciona, pasamos al siguiente + url = '' + if not item.armagedon: + url = urlparse.urljoin(host, scrapedurl) + #Leemos la siguiente página, que es de verdad donde está el magnet/torrent + try: + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(url, timeout=timeout).data) + data = unicode(data, "utf-8", errors="replace").encode("utf-8") + except: + pass + + patron = "window.open\('([^']+)'" + url = scrapertools.find_single_match(data, patron) + if not url: #error + logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + item.armagedon = True #Marcamos la situación como catastrófica + else: + continue #si no hay más datos, algo no funciona, pasamos al siguiente #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() - + item_local.url = urlparse.urljoin(host, url) + if item.videolibray_emergency_urls: + item.emergency_urls[0].append(item_local.url) #guardamos la url y pasamos a la siguiente + continue + if item.emergency_urls and not item.videolibray_emergency_urls: + item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA + if item.armagedon: + item_local.url = item.emergency_urls[0][0] #Restauramos la url + if len(item.emergency_urls[0]) > 1: + del item.emergency_urls[0][0] #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent - size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]') - if not size: + size = scrapertools.find_single_match(item_local.quality, '\s?\[(\d+,?\d*?\s\w\s?[b|B])\]') + if not size and not item.armagedon: size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent if size: - item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía + item_local.title = re.sub(r'\s?\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b') - item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía - item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad + item_local.quality = re.sub(r'\s?\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía + item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad + if item.armagedon: #Si es catastrófico, lo marcamos + item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality #Ahora pintamos el link del Torrent item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) @@ -421,6 +465,9 @@ def findvideos(item): #logger.debug(item_local) + if item.videolibray_emergency_urls: #Si ya hemos guardado todas las urls... + return item #... nos vamos + if len(itemlist_f) > 0: #Si hay entradas filtradas... itemlist.extend(itemlist_f) #Pintamos pantalla filtrada else: diff --git a/plugin.video.alfa/channels/zonatorrent.json b/plugin.video.alfa/channels/zonatorrent.json index 4c9c2fc7..1501d702 100644 --- a/plugin.video.alfa/channels/zonatorrent.json +++ b/plugin.video.alfa/channels/zonatorrent.json @@ -47,6 +47,28 @@ "VOSE" ] }, + { + "id": "emergency_urls", + "type": "list", + "label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?", + "default": 1, + "enabled": true, + "visible": true, + "lvalues": [ + "No", + "Guardar", + "Borrar", + "Actualizar" + ] + }, + { + "id": "emergency_urls_torrents", + "type": "bool", + "label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?", + "default": true, + "enabled": true, + "visible": "!eq(-1,'No')" + }, { "id": "timeout_downloadpage", "type": "list", diff --git a/plugin.video.alfa/channels/zonatorrent.py b/plugin.video.alfa/channels/zonatorrent.py index be3cef0c..546d7bb9 100644 --- a/plugin.video.alfa/channels/zonatorrent.py +++ b/plugin.video.alfa/channels/zonatorrent.py @@ -488,6 +488,8 @@ def findvideos(item): itemlist = [] itemlist_t = [] #Itemlist total de enlaces itemlist_f = [] #Itemlist de enlaces filtrados + titles = [] #Títulos de servidores Directos + urls = [] #Urls de servidores Directos if not item.language: item.language = ['CAST'] #Castellano por defecto matches = [] @@ -512,33 +514,75 @@ def findvideos(item): if not data: logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches de torrents + titles = item.emergency_urls[2] #Restauramos títulos de servidores Directos + urls = item.emergency_urls[3] #Restauramos urls de servidores Directos + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos - matches = re.compile(patron, re.DOTALL).findall(data) + if not item.armagedon: + matches = re.compile(patron, re.DOTALL).findall(data) if not matches and not scrapertools.find_single_match(data, 'data-TPlayerNv="Opt\d+">.*? <span>(.*?)</span></li>'): #error - logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) - itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada + if item.intervencion: #Sí ha sido clausurada judicialmente + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error + else: + logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) + + if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia? + matches = item.emergency_urls[1] #Restauramos matches de torrents + titles = item.emergency_urls[2] #Restauramos títulos de servidores Directos + urls = item.emergency_urls[3] #Restauramos urls de servidores Directos + item.armagedon = True #Marcamos la situación como catastrófica + else: + if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca... + return item #Devolvemos el Item de la llamada + else: + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos #logger.debug("PATRON: " + patron) #logger.debug(matches) - #logger.debug(data) + logger.debug(data) + + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls = [] #Iniciamos emergency_urls + item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales + item.emergency_urls.append(matches) #Salvamnos matches de los vídeos... #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB - item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) + if not item.videolibray_emergency_urls: + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Ahora tratamos los enlaces .torrent - for scrapedurl, scrapedserver, language, quality in matches: #leemos los torrents con la diferentes calidades + for scrapedurl, scrapedserver, language, quality in matches: #leemos los torrents con la diferentes calidades #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() - if 'torrent' not in scrapedserver.lower(): #Si es un servidor Directo, lo dejamos para luego + if 'torrent' not in scrapedserver.lower(): #Si es un servidor Directo, lo dejamos para luego continue item_local.url = scrapedurl if '.io/' in item_local.url: item_local.url = re.sub(r'http.?:\/\/\w+\.\w+\/', host, item_local.url) #Aseguramos el dominio del canal + if item.videolibray_emergency_urls: + item.emergency_urls[0].append(item_local.url) #guardamos la url y pasamos a la siguiente + continue + if item.emergency_urls and not item.videolibray_emergency_urls: + item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA + if item.armagedon: + item_local.url = item.emergency_urls[0][0] #Restauramos la url + if len(item.emergency_urls[0]) > 1: + del item.emergency_urls[0][0] + #Detectamos idiomas if ("latino" in scrapedurl.lower() or "latino" in language.lower()) and "LAT" not in item_local.language: item_local.language += ['LAT'] @@ -554,17 +598,19 @@ def findvideos(item): item_local.quality = quality if scrapertools.find_single_match(item.quality, '(\[\d+:\d+\ h])'): item_local.quality += ' [/COLOR][COLOR white]%s' % scrapertools.find_single_match(item.quality, '(\[\d+:\d+\ h])') + if item.armagedon: #Si es catastrófico, lo marcamos + item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]') - if not size: + if not size and not item.armagedon: size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent if size: item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b') - item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía - item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad + item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía + item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad #Ahora pintamos el link del Torrent item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) @@ -577,9 +623,9 @@ def findvideos(item): item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality).strip() item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() - item_local.alive = "??" #Calidad del link sin verificar - item_local.action = "play" #Visualizar vídeo - item_local.server = "torrent" #Servidor Torrent + item_local.alive = "??" #Calidad del link sin verificar + item_local.action = "play" #Visualizar vídeo + item_local.server = "torrent" #Servidor Torrent itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas @@ -601,8 +647,15 @@ def findvideos(item): #Ahora tratamos los Servidores Directos itemlist_t = [] #Itemlist total de enlaces itemlist_f = [] #Itemlist de enlaces filtrados - titles = re.compile('data-TPlayerNv="Opt\d+">.*? <span>(.*?)</span></li>', re.DOTALL).findall(data) - urls = re.compile('id="Opt\d+"><iframe[^>]+src="([^"]+)"', re.DOTALL).findall(data) + if not item.armagedon: + titles = re.compile('data-..layer..="Opt\d+">(?:<span>)?.*?\s?(?:<strong>.*?<\/strong>)?(?:<\/span>)?<span>(.*?)<\/span><\/li>', re.DOTALL).findall(data) + urls = re.compile('id="Opt\d+"><iframe[^>]+src="([^"]+)"', re.DOTALL).findall(data) + + #Si es un lookup para cargar las urls de emergencia en la Videoteca... + if item.videolibray_emergency_urls: + item.emergency_urls.append(titles) #Salvamnos matches de los títulos... + item.emergency_urls.append(urls) #Salvamnos matches de las urls... + return item #... y nos vamos #Recorremos la lista de servidores Directos, excluyendo YouTube para trailers if len(titles) == len(urls): @@ -618,8 +671,11 @@ def findvideos(item): title = titles[0] if "goo.gl" in urls[i]: - urls[i] = httptools.downloadpage(urls[i], follow_redirects=False, only_headers=True)\ - .headers.get("location", "") + try: + urls[i] = httptools.downloadpage(urls[i], follow_redirects=False, only_headers=True)\ + .headers.get("location", "") + except: + continue videourl = servertools.findvideos(urls[i]) #Buscamos la url del vídeo @@ -661,6 +717,8 @@ def findvideos(item): item_local.quality = quality #Añadimos la calidad if scrapertools.find_single_match(item.quality, '(\[\d+:\d+\ h])'): #Añadimos la duración item_local.quality += ' [/COLOR][COLOR white]%s' % scrapertools.find_single_match(item.quality, '(\[\d+:\d+\ h])') + if item.armagedon: #Si es catastrófico, lo marcamos + item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (alive, server.capitalize(), item_local.quality, str(item_local.language)) #Preparamos título y calidad, quitamos etiquetas vacías From e34d04e47e9d9380fc9bba68e3cbb6029efa0edb Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Wed, 28 Nov 2018 11:39:02 -0500 Subject: [PATCH 19/24] v2.7.16 --- plugin.video.alfa/addon.xml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 2f0d41b5..2485539d 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<addon id="plugin.video.alfa" name="Alfa" version="2.7.15" provider-name="Alfa Addon"> +<addon id="plugin.video.alfa" name="Alfa" version="2.7.16" provider-name="Alfa Addon"> <requires> <import addon="xbmc.python" version="2.1.0"/> <import addon="script.module.libtorrent" optional="true"/> @@ -11,7 +11,7 @@ <summary lang="es">Navega con Kodi por páginas web.</summary> <assets> <icon>logo-cumple.png</icon> - <fanart>fanart.jpg</fanart> + <fanart>fanart1.jpg</fanart> <screenshot>resources/media/themes/ss/1.jpg</screenshot> <screenshot>resources/media/themes/ss/2.jpg</screenshot> <screenshot>resources/media/themes/ss/3.jpg</screenshot> @@ -19,10 +19,13 @@ </assets> <news>[B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Arreglos[/B][/COLOR] - ¤ cinetux ¤ danimados ¤ xms - ¤ bitp ¤ descargacineclasico ¤ dostream - ¤ cinecalidad ¤ pelisplus ¤ Mejortorrent - ¤ Mejortorrent1 ¤ Newpc1 + ¤ DivxTotal ¤ Pelismagnet ¤ Subtorrents + ¤ Todopleiculas ¤ Zonatorrent ¤ maxipelis24 + ¤ Danimados ¤ mediafire ¤ mp4upload + ¤ vevio ¤ BlogHorror ¤ PelisPlus + ¤ TuPeliculas ¤ VeSeriesOnline ¤ Actualizado pack +18 + + ¤ Agradecimientos a @paeznet y @chivmalev por colaborar con ésta versión </news> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> From fb14f64fcbed29c5b0ec7e285284567de092cb58 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 5 Dec 2018 12:00:44 +0100 Subject: [PATCH 20/24] =?UTF-8?q?Todopeliculas:=20correcci=C3=B3n=20pagina?= =?UTF-8?q?do?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/todopeliculas.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/todopeliculas.py b/plugin.video.alfa/channels/todopeliculas.py index 6b7b68ab..b10a1954 100644 --- a/plugin.video.alfa/channels/todopeliculas.py +++ b/plugin.video.alfa/channels/todopeliculas.py @@ -178,7 +178,9 @@ def listado(item): #logger.debug(data) #Buscamos la url de paginado y la última página - patron = '<a href="([^"]+=(\d+))" title="Siguiente">Siguiente<\/a>' + patron = '<a href="([^"]+=(\d+))" title="Next">Next<\/a>' + if not scrapertools.find_single_match(data, patron): + patron = '<a href="([^"]+=(\d+))" title="Siguiente">Siguiente<\/a>' try: next_page_url, curr_page = scrapertools.find_single_match(data, patron) curr_page = int(curr_page) / len(matches) From b6673053eb946d4218b9708f5da14dbcd4c56b6f Mon Sep 17 00:00:00 2001 From: chivmalev <lbivan187@gmail.com> Date: Wed, 5 Dec 2018 08:56:50 -0300 Subject: [PATCH 21/24] =?UTF-8?q?correcci=C3=B3nes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/maxipelis24.py | 55 +++++++++++++---------- 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/plugin.video.alfa/channels/maxipelis24.py b/plugin.video.alfa/channels/maxipelis24.py index d8d1eb30..4e1d0938 100644 --- a/plugin.video.alfa/channels/maxipelis24.py +++ b/plugin.video.alfa/channels/maxipelis24.py @@ -98,29 +98,31 @@ def findvideos(item): itemlist = [] data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ","", data) - patron = scrapertools.find_single_match(data, '<div id="player2">(.*?)</div>') - patron = '<div id="div.*?<div class="movieplay">.+?[a-zA-Z]="([^&]+)&' - + data = re.sub(r"\n|\r|\t|\s{2}| ","", data) + + patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&' matches = re.compile(patron, re.DOTALL).findall(data) - for link in matches: - if 'id=' in link: - id_type = 'id' - ir_type = 'ir' - elif 'ud=' in link: - id_type = 'ud' - ir_type = 'ur' - elif 'od=' in link: - id_type = 'od' - ir_type = 'or' - elif 'ad=' in link: - id_type = 'ad' - ir_type = 'ar' - elif 'ed=' in link: - id_type = 'ed' - ir_type = 'er' - + for link in matches: + if 'maxipelis24.tv/hideload/?' in link: + if 'id=' in link: + id_type = 'id' + ir_type = 'ir' + elif 'ud=' in link: + id_type = 'ud' + ir_type = 'ur' + elif 'od=' in link: + id_type = 'od' + ir_type = 'or' + elif 'ad=' in link: + id_type = 'ad' + ir_type = 'ar' + elif 'ed=' in link: + id_type = 'ed' + ir_type = 'er' + else: + continue + id = scrapertools.find_single_match(link, '%s=(.*)' % id_type) base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type) @@ -131,11 +133,18 @@ def findvideos(item): url = video_data.headers['location'] title = '%s' + itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='', infoLabels=item.infoLabels)) itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) + if itemlist: + if config.get_videolibrary_support(): + itemlist.append(Item(channel = item.channel, action = "")) + itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green", + action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail, + contentTitle = item.contentTitle + )) - - return itemlist \ No newline at end of file + return itemlist From 791215933519612a6a95623d058173c4a5bdd3bf Mon Sep 17 00:00:00 2001 From: Intel1 <luisriverap@hotmail.com> Date: Wed, 5 Dec 2018 09:59:21 -0500 Subject: [PATCH 22/24] Actualizados MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit allcalidad: fix listado descargacineclasico: agregado sección: Listado alfabético porntrex: fix listado rexpelis: desactivado, web no funciona seriesmetro: fix listado youporn: aparecía en listado general de canales vidup: fix enlaces --- plugin.video.alfa/channels/allcalidad.py | 12 +++--- .../channels/descargacineclasico.py | 38 ++++++++++--------- plugin.video.alfa/channels/porntrex.py | 13 ++++--- plugin.video.alfa/channels/rexpelis.json | 2 +- plugin.video.alfa/channels/seriesmetro.py | 2 +- plugin.video.alfa/channels/youporn.json | 2 +- plugin.video.alfa/servers/vidup.json | 2 +- plugin.video.alfa/servers/vidup.py | 5 ++- 8 files changed, 41 insertions(+), 35 deletions(-) diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py index 6c5930d8..acce6a51 100755 --- a/plugin.video.alfa/channels/allcalidad.py +++ b/plugin.video.alfa/channels/allcalidad.py @@ -124,12 +124,12 @@ def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - patron = '(?s)short_overlay.*?<a href="([^"]+)' - patron += '.*?img.*?src="([^"]+)' - patron += '.*?title="([^"]+).*?' - patron += 'data-postid="([^"]+)' - matches = scrapertools.find_multiple_matches(data, patron) - for url, thumbnail, titulo, datapostid in matches: + matches = scrapertools.find_multiple_matches(data, '(?s)shortstory cf(.*?)rate_post') + for datos in matches: + url = scrapertools.find_single_match(datos, 'href="([^"]+)') + titulo = scrapertools.find_single_match(datos, 'short_header">([^<]+)').strip() + datapostid = scrapertools.find_single_match(datos, 'data-postid="([^"]+)') + thumbnail = scrapertools.find_single_match(datos, 'img w.*?src="([^"]+)') post = 'action=get_movie_details&postID=%s' %datapostid data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=post).data idioma = "Latino" diff --git a/plugin.video.alfa/channels/descargacineclasico.py b/plugin.video.alfa/channels/descargacineclasico.py index 2d52b3e8..07ee258f 100755 --- a/plugin.video.alfa/channels/descargacineclasico.py +++ b/plugin.video.alfa/channels/descargacineclasico.py @@ -11,27 +11,28 @@ from lib import unshortenit host = "http://www.descargacineclasico.net" - -def agrupa_datos(data): - # Agrupa los datos - data = re.sub(r'\n|\r|\t| |<br>|<!--.*?-->', '', data) - data = re.sub(r'\s+', ' ', data) - data = re.sub(r'>\s<', '><', data) - return data - - def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas", - url=host, viewmode="movie_with_plot", - thumbnail=get_thumb('last', auto=True))) + url=host, viewmode="movie_with_plot", thumbnail=get_thumb('last', auto=True))) itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero", - url=host, - thumbnail=get_thumb('genres', auto=True))) - itemlist.append( - Item(channel=item.channel, title="Buscar", action="search", url=host, - thumbnail=get_thumb('search', auto=True))) + url=host, thumbnail=get_thumb('genres', auto=True))) + itemlist.append(Item(channel=item.channel, title="Listado alfabetico", action="porLetra", + url=host + "/cine-online/", thumbnail=get_thumb('alphabet', auto=True))) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host, + thumbnail=get_thumb('search', auto=True))) + return itemlist + + +def porLetra(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = 'noindex,nofollow" href="([^"]+)">(\w+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for url, titulo in matches: + itemlist.append( Item(channel=item.channel , action="agregadas" , title=titulo, url=url, viewmode="movie_with_plot")) return itemlist @@ -43,7 +44,9 @@ def porGenero(item): data = re.compile(patron,re.DOTALL).findall(data) patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data[0]) - for url,genero in matches: + for url, genero in matches: + if genero == "Erótico" and config.get_setting("adult_mode") == 0: + continue itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot")) return itemlist @@ -129,7 +132,6 @@ def findvideos(item): contentTitle = item.contentTitle )) return itemlist - return itemlist def play(item): diff --git a/plugin.video.alfa/channels/porntrex.py b/plugin.video.alfa/channels/porntrex.py index 5136cb41..1fc59e4d 100644 --- a/plugin.video.alfa/channels/porntrex.py +++ b/plugin.video.alfa/channels/porntrex.py @@ -66,16 +66,21 @@ def lista(item): action = "menu_info" # Extrae las entradas - patron = '<div class="video-item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="durations">.*?</i>([^<]+)<' + patron = '<div class="video-item.*?href="([^"]+)" ' + patron += 'title="([^"]+)".*?' + patron += 'data-src="([^"]+)"' + patron += '(.*?)<div class="durations">.*?' + patron += '</i>([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches: if "go.php?" in scrapedurl: scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) - scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) + if not scrapedthumbnail.startswith("https"): + scrapedthumbnail = "https:%s" % scrapedthumbnail else: scrapedurl = urlparse.urljoin(host, scrapedurl) if not scrapedthumbnail.startswith("https"): - scrapedthumbnail = host + "%s" % scrapedthumbnail + scrapedthumbnail = "https:%s" % scrapedthumbnail if duration: scrapedtitle = "%s - %s" % (duration, scrapedtitle) if '>HD<' in quality: @@ -110,7 +115,6 @@ def lista(item): next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % ( item.url, next_page) itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - return itemlist @@ -225,7 +229,6 @@ def play(item): itemlist = [] data = get_data(item.url) - patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\'' matches = scrapertools.find_multiple_matches(data, patron) if not matches: diff --git a/plugin.video.alfa/channels/rexpelis.json b/plugin.video.alfa/channels/rexpelis.json index b6cd46b8..bdae8bf3 100644 --- a/plugin.video.alfa/channels/rexpelis.json +++ b/plugin.video.alfa/channels/rexpelis.json @@ -1,7 +1,7 @@ { "id": "rexpelis", "name": "Rexpelis", - "active": true, + "active": false, "adult": false, "language": ["lat","cast"], "thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png", diff --git a/plugin.video.alfa/channels/seriesmetro.py b/plugin.video.alfa/channels/seriesmetro.py index 19516332..32f0bd83 100644 --- a/plugin.video.alfa/channels/seriesmetro.py +++ b/plugin.video.alfa/channels/seriesmetro.py @@ -59,7 +59,7 @@ def list_all(item): itemlist = [] data = get_source(item.url) - patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"' + patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-lazy-src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: diff --git a/plugin.video.alfa/channels/youporn.json b/plugin.video.alfa/channels/youporn.json index d63d7d3c..ed466088 100644 --- a/plugin.video.alfa/channels/youporn.json +++ b/plugin.video.alfa/channels/youporn.json @@ -2,7 +2,7 @@ "id": "youporn", "name": "youporn", "active": true, - "adult": false, + "adult": true, "language": ["*"], "thumbnail": "https://fs.ypncdn.com/cb/bundles/youpornwebfront/images/l_youporn_black.png", "banner": "", diff --git a/plugin.video.alfa/servers/vidup.json b/plugin.video.alfa/servers/vidup.json index 39f7ddd3..6ecbc72a 100755 --- a/plugin.video.alfa/servers/vidup.json +++ b/plugin.video.alfa/servers/vidup.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "vidup.(?:me|tv)/(?:embed-|)([A-z0-9]+)", + "pattern": "vidup.(?:me|tv|io)/(?:embed-|)([A-z0-9]+)", "url": "http://vidup.tv/embed-\\1.html" } ] diff --git a/plugin.video.alfa/servers/vidup.py b/plugin.video.alfa/servers/vidup.py index c38e843e..833dc817 100755 --- a/plugin.video.alfa/servers/vidup.py +++ b/plugin.video.alfa/servers/vidup.py @@ -20,8 +20,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls = [] post= {} post = urllib.urlencode(post) - url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "") - data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data + headers = {"Referer":page_url} + url = httptools.downloadpage(page_url, follow_redirects=False, headers=headers, only_headers=True).headers.get("location", "") + data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed.([A-z0-9]+)"), post=post).data bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}') matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)') for res, media_url in matches: From 3bf456e44b3693822427d6d6a960c4c5df4a1059 Mon Sep 17 00:00:00 2001 From: Intel1 <luisriverap@hotmail.com> Date: Wed, 5 Dec 2018 11:51:45 -0500 Subject: [PATCH 23/24] Varios --- plugin.video.alfa/channels/cine24h.json | 63 +++ plugin.video.alfa/channels/cine24h.py | 382 +++++++++++++++ plugin.video.alfa/channels/hdfilmologia.json | 48 ++ plugin.video.alfa/channels/hdfilmologia.py | 262 +++++++++++ plugin.video.alfa/channels/pedropolis.py | 4 +- plugin.video.alfa/channels/pelis24.json | 49 ++ plugin.video.alfa/channels/pelis24.py | 397 ++++++++++++++++ plugin.video.alfa/channels/pelishd24.json | 64 +++ plugin.video.alfa/channels/pelishd24.py | 464 +++++++++++++++++++ plugin.video.alfa/channels/pelisplay.json | 93 ++++ plugin.video.alfa/channels/pelisplay.py | 414 +++++++++++++++++ plugin.video.alfa/channels/thumbzilla.json | 7 +- plugin.video.alfa/channels/thumbzilla.py | 32 +- plugin.video.alfa/channels/xms.py | 81 +++- 14 files changed, 2326 insertions(+), 34 deletions(-) create mode 100644 plugin.video.alfa/channels/cine24h.json create mode 100644 plugin.video.alfa/channels/cine24h.py create mode 100644 plugin.video.alfa/channels/hdfilmologia.json create mode 100644 plugin.video.alfa/channels/hdfilmologia.py create mode 100644 plugin.video.alfa/channels/pelis24.json create mode 100644 plugin.video.alfa/channels/pelis24.py create mode 100644 plugin.video.alfa/channels/pelishd24.json create mode 100644 plugin.video.alfa/channels/pelishd24.py create mode 100644 plugin.video.alfa/channels/pelisplay.json create mode 100644 plugin.video.alfa/channels/pelisplay.py diff --git a/plugin.video.alfa/channels/cine24h.json b/plugin.video.alfa/channels/cine24h.json new file mode 100644 index 00000000..b679159c --- /dev/null +++ b/plugin.video.alfa/channels/cine24h.json @@ -0,0 +1,63 @@ +{ + "id": "cine24h", + "name": "Cine24H", + "active": true, + "adult": false, + "language": ["lat", "cast", "eng"], + "fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg", + "thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png", + "banner": "", + "categories": [ + "movie", + "tvshow", + "vose" + ], + "settings": [ + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Castellano", + "English" + ] + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 5", + "Perfil 4", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "orden_episodios", + "type": "bool", + "label": "Mostrar los episodios de las series en orden descendente", + "default": false, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/cine24h.py b/plugin.video.alfa/channels/cine24h.py new file mode 100644 index 00000000..e5edfed9 --- /dev/null +++ b/plugin.video.alfa/channels/cine24h.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# -*- Channel CanalPelis -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re +import sys +import urlparse + +from channels import autoplay +from channels import filtertools +from core import httptools +from core import scrapertools +from core import servertools +from core.item import Item +from core import channeltools +from core import tmdb +from platformcode import config, logger +from channelselector import get_thumb + +__channel__ = "cine24h" + +host = "https://cine24h.net/" + +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) + __perfil__ = int(config.get_setting('perfil', __channel__)) +except: + __modo_grafico__ = True + __perfil__ = 0 + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = "" + +headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] + +IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry'] + + +def mainlist(item): + logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, + viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)), + + item.clone(title="Series", action="series", extra='serie', url=host + 'series/', + viewmode="movie_with_plot", text_blod=True, viewcontent='movies', + thumbnail=get_thumb('tvshows', auto=True), page=0), + + item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), + text_blod=True, url=host, page=0)] + + autoplay.show_option(item.channel, itemlist) + return itemlist + + +def menumovies(item): + logger.info() + itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'peliculas/', viewmode="movie_with_plot"), + + item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + '?s=trfilter&trfilter=1&years%5B%5D=2018', viewmode="movie_with_plot"), + + item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'peliculas-mas-vistas/', viewmode="movie_with_plot"), + + item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host, viewmode="movie_with_plot"), + + item.clone(title="Estrenos por Año", action="genresYears", thumbnail=get_thumb('year', auto=True), + text_blod=True, page=0, viewcontent='movies', url=host, + viewmode="movie_with_plot"), + + item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), + text_blod=True, url=host, page=0, extra='buscarP')] + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) + + try: + return peliculas(item) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) + data = scrapertools.decodeHtmlentities(data) + patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url + patron += '<img src="([^"]+)".*?' # img + patron += '</figure>(.*?)' # tipo + patron += '<h3 class="Title">([^<]+)</h3>.*?' # title + patron += '<span class="Year">([^<]+)</span>.*?' # year + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]: + if item.title == 'Buscar' and 'serie' in scrapedurl: + action = 'temporadas' + contentType = 'tvshow' + title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]' + else: + action = 'findvideos' + contentType = 'movie' + title = scrapedtitle + + itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle, + url=scrapedurl, infoLabels={'year': year}, contentType=contentType, + contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail, + title=title, context="buscar_trailer")) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if item.page + 30 < len(matches): + itemlist.append(item.clone(page=item.page + 30, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">') + if next_page: + itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3)) + + return itemlist + + +def genresYears(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + data = scrapertools.decodeHtmlentities(data) + + if item.title == "Estrenos por Año": + patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros' + else: + patron_todas = 'Géneros</a>(.*?)</li></ul></li>' + + data = scrapertools.find_single_match(data, patron_todas) + patron = '<a href="([^"]+)">([^<]+)</a>' # url, title + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle in matches: + + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas")) + + return itemlist + + +def year_release(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + + itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0, + url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next')) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url + patron += '<img src="([^"]+)".*?' # img + patron += '<h3 class="Title">([^<]+)</h3>' # title + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]: + + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas", + contentSerieName=scrapedtitle, show=scrapedtitle, + thumbnail='https:' + scrapedthumbnail, contentType='tvshow')) + + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + if item.page + 30 < len(matches): + itemlist.append(item.clone(page=item.page + 30, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">') + + if next_page: + itemlist.append(item.clone(url=next_page, page=0, + title="» Siguiente »", text_color=color3)) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas + + matches = scrapertools.find_multiple_matches(data, patron) + if len(matches) > 1: + for scrapedseason in matches: + new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas') + new_item.infoLabels['season'] = scrapedseason + new_item.extra = "" + itemlist.append(new_item) + + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + for i in itemlist: + i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) + if i.infoLabels['title']: + # Si la temporada tiene nombre propio añadirselo al titulo del item + i.title += " - %s" % (i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si la temporada tiene poster propio remplazar al de la serie + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: int(it.infoLabels['season'])) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + else: + return episodios(item) + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url + patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle, scrapedname in matches: + scrapedtitle = scrapedtitle.replace('--', '0') + patron = '(\d+)x(\d+)' + match = re.compile(patron, re.DOTALL).findall(scrapedtitle) + season, episode = match[0] + + if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): + continue + + title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname) + new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, + contentType="episode") + if 'infoLabels' not in new_item: + new_item.infoLabels = {} + + new_item.infoLabels['season'] = season + new_item.infoLabels['episode'] = episode.zfill(2) + + itemlist.append(new_item) + + # TODO no hacer esto si estamos añadiendo a la videoteca + if not item.extra: + # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos + tmdb.set_infoLabels(itemlist, __modo_grafico__) + for i in itemlist: + if i.infoLabels['title']: + # Si el capitulo tiene nombre propio añadirselo al titulo del item + i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[ + 'episode'], i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si el capitulo tiene imagen propia remplazar al poster + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: int(it.infoLabels['episode']), + reverse=config.get_setting('orden_episodios', __channel__)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + # Opción "Añadir esta serie a la videoteca" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data) + data = scrapertools.decodeHtmlentities(data) + patron = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span>(.*?)</li>' # option, server, lang - quality + matches = re.compile(patron, re.DOTALL).findall(data) + + for option, servername, quote in matches: + patron = '<span>(.*?) -([^<]+)</span' + match = re.compile(patron, re.DOTALL).findall(quote) + lang, quality = match[0] + quality = quality.strip() + headers = {'Referer': item.url} + url_1 = scrapertools.find_single_match(data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option) + new_data = httptools.downloadpage(url_1, headers=headers).data + new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data) + new_data = scrapertools.decodeHtmlentities(new_data) + url2 = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"') + url = url2 + '|%s' % url_1 + if 'rapidvideo' in url2: + url = url2 + + lang = lang.lower().strip() + languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]', + 'español': '[COLOR green](CAST)[/COLOR]', + 'subespañol': '[COLOR red](VOS)[/COLOR]', + 'sub': '[COLOR red](VOS)[/COLOR]'} + if lang in languages: + lang = languages[lang] + + servername = servertools.get_server_from_url(url) + + title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % ( + servername.title(), quality, lang) + + itemlist.append(item.clone(action='play', url=url, title=title, language=lang, quality=quality, + text_color=color3)) + + itemlist = servertools.get_servers_itemlist(itemlist) + + itemlist.sort(key=lambda it: it.language, reverse=False) + + # Requerido para FilterTools + itemlist = filtertools.get_links(itemlist, item, list_language) + # Requerido para AutoPlay + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios': + itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + thumbnail=thumbnail_host, contentTitle=item.contentTitle)) + + return itemlist diff --git a/plugin.video.alfa/channels/hdfilmologia.json b/plugin.video.alfa/channels/hdfilmologia.json new file mode 100644 index 00000000..eb3846b5 --- /dev/null +++ b/plugin.video.alfa/channels/hdfilmologia.json @@ -0,0 +1,48 @@ +{ + "id": "hdfilmologia", + "name": "HDFilmologia", + "active": true, + "adult": false, + "language": ["cast", "lat"], + "fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg", + "thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png", + "banner": "", + "categories": [ + "movie", + "vos" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 5", + "Perfil 4", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "orden_episodios", + "type": "bool", + "label": "Mostrar los episodios de las series en orden descendente", + "default": false, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/hdfilmologia.py b/plugin.video.alfa/channels/hdfilmologia.py new file mode 100644 index 00000000..6d0952a2 --- /dev/null +++ b/plugin.video.alfa/channels/hdfilmologia.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +# -*- Channel HDFilmologia -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re +import sys +import urllib +import urlparse + +from core import httptools +from core import scrapertools +from core import servertools +from core.item import Item +from core import channeltools +from core import tmdb +from platformcode import config, logger +from channelselector import get_thumb + +__channel__ = "hdfilmologia" + +host = "https://hdfilmologia.com/" + +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) + __perfil__ = int(config.get_setting('perfil', __channel__)) +except: + __modo_grafico__ = True + __perfil__ = 0 + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = "" + +headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Últimas Agregadas", action="movies",thumbnail=get_thumb('last', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'index.php?do=lastnews', viewmode="movie_with_plot")) + + itemlist.append(item.clone(title="Estrenos", action="movies", thumbnail=get_thumb('premieres', auto=True), + text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos', + viewmode="movie_with_plot")) + + itemlist.append(item.clone(title="Más Vistas", action="movies",thumbnail=get_thumb('more watched', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'mas-vistas/', viewmode="movie_with_plot")) + + itemlist.append(item.clone(title="Películas Por País", action="countriesYears",thumbnail=get_thumb('country', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host, viewmode="movie_with_plot")) + + itemlist.append(item.clone(title="Películas Por Año", action="countriesYears",thumbnail=get_thumb('year', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host, viewmode="movie_with_plot")) + + itemlist.append(item.clone(title="Géneros", action="genres",thumbnail=get_thumb('genres', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host, viewmode="movie_with_plot")) + + + + itemlist.append(item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True), + text_blod=True, url=host, page=0)) + + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = urlparse.urljoin(item.url, "?do=search&mode=advanced&subaction=search&story={0}".format(texto)) + # 'https://hdfilmologia.com/?do=search&mode=advanced&subaction=search&story=la+sombra' + + try: + return sub_search(item) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + +def sub_search(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<a class="sres-wrap clearfix" href="([^"]+)">' #url + patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title + patron += '<div class="sres-desc">(.*?)</div>' # plot + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, plot in matches: + + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle, + action="findvideos", text_color=color3, page=0, plot=plot, + thumbnail=host+scrapedthumbnail)) + + pagination = scrapertools.find_single_match(data, 'class="pnext"><a href="([^"]+)">') + + if pagination: + itemlist.append(Item(channel=__channel__, action="sub_search", + title="» Siguiente »", url=pagination)) + + tmdb.set_infoLabels(itemlist) + + return itemlist + + +def movies(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) + patron = '<div class="kino-item ignore-select">.*?<a href="([^"]+)" class="kino-h"><h2>([^<]+)</h2>.*?' # url, title + patron += '<img src="([^"]+)".*?' # img + patron += '<div class="k-meta qual-mark">([^<]+)</div>.*?' # quality + patron += '<strong>Año:</strong></div>([^<]+)</li>' # year + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle, scrapedthumbnail, quality, year in matches[item.page:item.page + 25]: + scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle, quality) + + itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3, + url=scrapedurl, infoLabels={'year': year.strip()}, + contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, + title=title, context="buscar_trailer")) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if item.page + 25 < len(matches): + itemlist.append(item.clone(page=item.page + 25, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match( + data, 'class="pnext"><a href="([^"]+)">') + + if next_page: + itemlist.append(item.clone(url=next_page, page=0, + title="» Siguiente »", text_color=color3)) + + + return itemlist + + +def genres(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + + itemlist.append(item.clone(channel=__channel__, action="movies", title=scrapedtitle, + url=host+scrapedurl, text_color=color3, viewmode="movie_with_plot")) + + return itemlist + + +def countriesYears(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + + if item.title == "Películas Por País": + patron_todas = 'Por País</option>(.*?)</option></select>' + else: + patron_todas = 'Por Año</option>(.*?)<option value="/">Peliculas' + + data = scrapertools.find_single_match(data, patron_todas) + patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle in matches: + + itemlist.append(item.clone(title=scrapedtitle, url=host+scrapedurl, action="movies")) + + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data) + + patron = '(\w+)src\d+="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for lang, url in matches: + + server = servertools.get_server_from_url(url) + if 'dropbox' in url: + server = 'dropbox' + if '/drive/' in url: + data = httptools.downloadpage(url).data + url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"') + server = 'gdrive' + + if 'ultrapeliculashd' in url: + data = httptools.downloadpage(url).data + # logger.info(data) + patron = "\|s\|(\w+)\|" + matches = re.compile(patron, re.DOTALL).findall(data) + for key in matches: + url = 'https://www.dropbox.com/s/%s?dl=1' % (key) + server = 'dropbox' + languages = {'l': '[COLOR cornflowerblue](LAT)[/COLOR]', + 'e': '[COLOR green](CAST)[/COLOR]', + 's': '[COLOR red](VOS)[/COLOR]'} + if lang in languages: + lang = languages[lang] + + title = "Ver en: [COLOR yellow](%s)[/COLOR] [COLOR yellowgreen]%s[/COLOR]" % (server.title(), lang) + if 'youtube' not in server: + + itemlist.append(item.clone(action='play', url=url, title=title, language=lang, + text_color=color3)) + + itemlist = servertools.get_servers_itemlist(itemlist) + itemlist.sort(key=lambda it: it.language, reverse=False) + + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios': + itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + thumbnail=thumbnail_host, contentTitle=item.contentTitle)) + + return itemlist diff --git a/plugin.video.alfa/channels/pedropolis.py b/plugin.video.alfa/channels/pedropolis.py index 74f97c13..60702a68 100644 --- a/plugin.video.alfa/channels/pedropolis.py +++ b/plugin.video.alfa/channels/pedropolis.py @@ -136,7 +136,7 @@ def peliculas(item): tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - pagination = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'") + pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />') if pagination: itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »", @@ -239,7 +239,7 @@ def series(item): action="temporadas", contentType='tvshow')) tmdb.set_infoLabels(itemlist, __modo_grafico__) - pagination = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)' />") + pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />') if pagination: itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination, diff --git a/plugin.video.alfa/channels/pelis24.json b/plugin.video.alfa/channels/pelis24.json new file mode 100644 index 00000000..542a0656 --- /dev/null +++ b/plugin.video.alfa/channels/pelis24.json @@ -0,0 +1,49 @@ +{ + "id": "pelis24", + "name": "Pelis24", + "active": true, + "adult": false, + "language": ["lat"], + "fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg", + "thumbnail": "https://www.pelis24.in/wp-content/uploads/2018/05/44.png", + "banner": "", + "categories": [ + "movie", + "tvshow", + "vos" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 5", + "Perfil 4", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "orden_episodios", + "type": "bool", + "label": "Mostrar los episodios de las series en orden descendente", + "default": false, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/pelis24.py b/plugin.video.alfa/channels/pelis24.py new file mode 100644 index 00000000..859646c1 --- /dev/null +++ b/plugin.video.alfa/channels/pelis24.py @@ -0,0 +1,397 @@ +# -*- coding: utf-8 -*- +# -*- Channel CanalPelis -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re +import sys +import urllib +import urlparse + +from channels import autoplay +from channels import filtertools +from core import httptools +from core import scrapertools +from core import servertools +from core.item import Item +from core import channeltools +from core import tmdb +from platformcode import config, logger +from channelselector import get_thumb + +__channel__ = "pelis24" + +host = "https://www.pelis24.in/" + +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) + __perfil__ = int(config.get_setting('perfil', __channel__)) +except: + __modo_grafico__ = True + __perfil__ = 0 + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = "" + +headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] + +IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry'] + + + + + + +def mainlist(item): + logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [item.clone(title="Novedades", action="peliculas",thumbnail=get_thumb('newest', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'movies/', viewmode="movie_with_plot"), + + item.clone(title="Tendencias", action="peliculas",thumbnail=get_thumb('newest', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"), + + item.clone(title="Estrenos", action="peliculas",thumbnail=get_thumb('estrenos', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'genre/estrenos/', viewmode="movie_with_plot"), + + item.clone(title="Géneros", action="genresYears",thumbnail=get_thumb('genres', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host, viewmode="movie_with_plot"), + + item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True), + text_blod=True, url=host, page=0)] + + autoplay.show_option(item.channel, itemlist) + return itemlist + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) + + try: + return sub_search(item) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + +def sub_search(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + # logger.info(data) + data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages') + # logger.info(data) + patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title + patron += '<span class="year">([^<]+)</span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches: + if 'tvshows' not in scrapedurl: + + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle, + action="findvideos", infoLabels={"year": year}, + thumbnail=scrapedthumbnail, text_color=color3)) + + paginacion = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'") + + if paginacion: + itemlist.append(Item(channel=item.channel, action="sub_search", + title="» Siguiente »", url=paginacion, + thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png')) + + tmdb.set_infoLabels(itemlist) + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) + data = scrapertools.decodeHtmlentities(data) + # logger.info(data) + + patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title + patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url + patron += '</h3><span>([^<]+)</span>' # year + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]: + title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality) + + itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3, + url=scrapedurl, infoLabels={'year': year}, quality=quality, + contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, + title=title, context="buscar_trailer")) + + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if item.page + 30 < len(matches): + itemlist.append(item.clone(page=item.page + 30, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />') + if next_page: + itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3)) + + + return itemlist + + +def genresYears(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + data = scrapertools.decodeHtmlentities(data) + + if item.title == "Estrenos por Año": + patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros' + else: + patron_todas = '<h2>Generos</h2>(.*?)</div><aside' + # logger.error(texto='***********uuuuuuu*****' + patron_todas) + + data = scrapertools.find_single_match(data, patron_todas) + # logger.error(texto='***********uuuuuuu*****' + data) + patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos + # patron = '<a href="([^"]+)">([^<]+)</a>' # url, title + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle, videos_num in matches: + title = '%s (%s)' % (scrapedtitle, videos_num.replace('.', ',')) + + itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas")) + + + return itemlist + + +def year_release(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + # logger.info(data) + patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + + itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0, + url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next')) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + # logger.info(data) + + patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url + patron += '<img src="([^"]+)".*?' # img + patron += '<h3 class="Title">([^<]+)</h3>' # title + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]: + + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas", + contentSerieName=scrapedtitle, show=scrapedtitle, + thumbnail='https:'+scrapedthumbnail, contentType='tvshow')) + + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + if item.page + 30 < len(matches): + itemlist.append(item.clone(page=item.page + 30, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">') + + if next_page: + itemlist.append(item.clone(url=next_page, page=0, + title="» Siguiente »", text_color=color3)) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + # logger.info(data) + patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas + + matches = scrapertools.find_multiple_matches(data, patron) + if len(matches) > 1: + for scrapedseason in matches: + new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas') + new_item.infoLabels['season'] = scrapedseason + new_item.extra = "" + itemlist.append(new_item) + + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + for i in itemlist: + i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) + if i.infoLabels['title']: + # Si la temporada tiene nombre propio añadirselo al titulo del item + i.title += " - %s" % (i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si la temporada tiene poster propio remplazar al de la serie + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: int(it.infoLabels['season'])) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + else: + return episodios(item) + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url + patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle, scrapedname in matches: + scrapedtitle = scrapedtitle.replace('--', '0') + patron = '(\d+)x(\d+)' + match = re.compile(patron, re.DOTALL).findall(scrapedtitle) + season, episode = match[0] + + if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): + continue + + title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname) + new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, + contentType="episode") + if 'infoLabels' not in new_item: + new_item.infoLabels = {} + + new_item.infoLabels['season'] = season + new_item.infoLabels['episode'] = episode.zfill(2) + + itemlist.append(new_item) + + # TODO no hacer esto si estamos añadiendo a la videoteca + if not item.extra: + # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos + tmdb.set_infoLabels(itemlist, __modo_grafico__) + for i in itemlist: + if i.infoLabels['title']: + # Si el capitulo tiene nombre propio añadirselo al titulo del item + i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[ + 'episode'], i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si el capitulo tiene imagen propia remplazar al poster + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: int(it.infoLabels['episode']), + reverse=config.get_setting('orden_episodios', __channel__)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + # Opción "Añadir esta serie a la videoteca" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data) + data = scrapertools.decodeHtmlentities(data) + # logger.info(data) + + # patron1 = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span><span>(.*?)</span>' # option, server, lang - quality + patron = 'href="#option-(.*?)"><span class="dt_flag"><img src="[^"]+"></span>([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + # urls = re.compile(patron2, re.DOTALL).findall(data) + + for option, lang in matches: + url = scrapertools.find_single_match(data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option) + lang = lang.lower().strip() + languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]', + 'castellano': '[COLOR green](CAST)[/COLOR]', + 'español': '[COLOR green](CAST)[/COLOR]', + 'subespañol': '[COLOR red](VOS)[/COLOR]', + 'sub': '[COLOR red](VOS)[/COLOR]', + 'ingles': '[COLOR red](VOS)[/COLOR]'} + if lang in languages: + lang = languages[lang] + + server = servertools.get_server_from_url(url) + title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang) + # if 'google' not in url and 'directo' not in server: + + + itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3)) + + itemlist = servertools.get_servers_itemlist(itemlist) + itemlist.sort(key=lambda it: it.language, reverse=False) + + # Requerido para FilterTools + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios': + itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + thumbnail=thumbnail_host, contentTitle=item.contentTitle)) + + return itemlist diff --git a/plugin.video.alfa/channels/pelishd24.json b/plugin.video.alfa/channels/pelishd24.json new file mode 100644 index 00000000..2e4ef339 --- /dev/null +++ b/plugin.video.alfa/channels/pelishd24.json @@ -0,0 +1,64 @@ +{ + "id": "pelishd24", + "name": "PelisHD24", + "active": true, + "adult": false, + "language": ["lat", "cast", "eng"], + "fanart": "https://pelishd24.com/wp-content/uploads/2018/11/background.png", + "thumbnail": "https://pelishd24.com/wp-content/uploads/2018/07/pelishd24.2.png", + "banner": "", + "categories": [ + "movie", + "tvshow", + "vos", + "direct" + ], + "settings": [ + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Castellano", + "English" + ] + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 5", + "Perfil 4", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "orden_episodios", + "type": "bool", + "label": "Mostrar los episodios de las series en orden descendente", + "default": false, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/pelishd24.py b/plugin.video.alfa/channels/pelishd24.py new file mode 100644 index 00000000..c4b25295 --- /dev/null +++ b/plugin.video.alfa/channels/pelishd24.py @@ -0,0 +1,464 @@ +# -*- coding: utf-8 -*- +# -*- Channel PelisHD24 -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re +import sys +import urlparse + +from channels import autoplay +from lib import generictools +from channels import filtertools +from core import httptools +from core import scrapertools +from core import servertools +from core.item import Item +from core import channeltools +from core import tmdb +from platformcode import config, logger +from channelselector import get_thumb + +__channel__ = "pelishd24" + +host = "https://pelishd24.com/" + +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) + __perfil__ = int(config.get_setting('perfil', __channel__)) +except: + __modo_grafico__ = True + __perfil__ = 0 + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = "" + +headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] + +IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'English': 'VOS'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry'] + + +def mainlist(item): + logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, + viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)), + + item.clone(title="Series", action="series", extra='serie', url=host + 'series/', + viewmode="movie_with_plot", text_blod=True, viewcontent='movies', + thumbnail=get_thumb('tvshows', auto=True), page=0), + + item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), + text_blod=True, url=host, page=0)] + + autoplay.show_option(item.channel, itemlist) + return itemlist + + +def menumovies(item): + logger.info() + itemlist = [item.clone(title="Todas", action="peliculas", thumbnail=get_thumb('all', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'peliculas/', viewmode="movie_with_plot"), + + item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + '?s=trfilter&trfilter=1&years=2018', viewmode="movie_with_plot"), + + item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'mas-vistas/', viewmode="movie_with_plot"), + + item.clone(title="Más Votadas", action="peliculas", thumbnail=get_thumb('more voted', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host + 'peliculas-mas-votadas/', viewmode="movie_with_plot"), + + item.clone(title="Géneros", action="genres_atoz", thumbnail=get_thumb('genres', auto=True), + text_blod=True, page=0, viewcontent='movies', + url=host, viewmode="movie_with_plot"), + + item.clone(title="A-Z", action="genres_atoz", thumbnail=get_thumb('year', auto=True), + text_blod=True, page=0, viewcontent='movies', url=host, + viewmode="movie_with_plot"), + + item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), + text_blod=True, url=host, page=0, extra='buscarP')] + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) + + try: + return peliculas(item) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + +def peliculas(item): + logger.info() + itemlist = [] + action = '' + contentType = '' + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) + data = scrapertools.decodeHtmlentities(data) + + patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url + patron += '<img src="([^"]+)".*?' # img + patron += '</figure>(.*?)' # tipo + patron += '<h3 class="Title">([^<]+)</h3>.*?' # title + patron += '<span class="Year">([^<]+)</span>.*?' # year + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]: + title = '' + if '/serie/' in scrapedurl: + action = 'temporadas' + contentType = 'tvshow' + title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]' + else: + action = 'findvideos' + contentType = 'movie' + title = scrapedtitle + + itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle, + url=scrapedurl, infoLabels={'year': year}, extra='peliculas', + contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail, + title=title, context="buscar_trailer", contentType=contentType)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if item.page + 30 < len(matches): + itemlist.append(item.clone(page=item.page + 30, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">') + if next_page: + itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3)) + + return itemlist + + +def genres_atoz(item): + logger.info() + itemlist = [] + action = '' + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + data = scrapertools.decodeHtmlentities(data) + + if item.title == "A-Z": + patron_todas = '<ul class="AZList"(.*?)</li></ul>' + action = 'atoz' + else: + patron_todas = '<a href="#">GENERO</a>(.*?)</li></ul>' + action = 'peliculas' + + data = scrapertools.find_single_match(data, patron_todas) + patron = '<a href="([^"]+)">([^<]+)</a>' # url, title + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle in matches: + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=action)) + + return itemlist + + +def atoz(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + data = scrapertools.decodeHtmlentities(data) + + patron = '<td class="MvTbImg"> <a href="([^"]+)".*?' # url + patron += '<img src="([^"]+)".*?' # img + patron += '<strong>([^<]+)</strong> </a></td><td>([^<]+)</td>.*?' # title, year + patron += '<span class="Qlty">([^<]+)</span>' # quality + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches[item.page:item.page + 30]: + title = '' + action = '' + if '/serie/' in scrapedurl: + action = 'temporadas' + contentType = 'tvshow' + title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]' + else: + action = 'findvideos' + contentType = 'movie' + title = "%s [COLOR yellow]%s[/COLOR]" % (scrapedtitle, quality) + + itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, contentType=contentType, + url=scrapedurl, infoLabels={'year': year}, extra='peliculas', + contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail, + title=title, context="buscar_trailer", show=scrapedtitle, )) + + if item.page + 30 < len(matches): + itemlist.append(item.clone(page=item.page + 30, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">') + if next_page: + itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) + + patron = '<article class="TPost C">\s*<a href="([^"]+)">.*?' # url + patron += '<img src="([^"]+)".*?' # img + patron += '<h3 class="Title">([^<]+)</h3>' # title + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]: + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas", + contentSerieName=scrapedtitle, show=scrapedtitle, + thumbnail='https:' + scrapedthumbnail, contentType='tvshow')) + + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + if item.page + 30 < len(matches): + itemlist.append(item.clone(page=item.page + 30, + title="» Siguiente »", text_color=color3)) + else: + next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">') + + if next_page: + itemlist.append(item.clone(url=next_page, page=0, + title="» Siguiente »", text_color=color3)) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas + + matches = scrapertools.find_multiple_matches(data, patron) + if len(matches) > 1: + for scrapedseason in matches: + new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas') + new_item.infoLabels['season'] = scrapedseason + new_item.extra = "" + itemlist.append(new_item) + + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + for i in itemlist: + i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) + if i.infoLabels['title']: + # Si la temporada tiene nombre propio añadirselo al titulo del item + i.title += " - %s" % (i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si la temporada tiene poster propio remplazar al de la serie + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: int(it.infoLabels['season'])) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + else: + return episodios(item) + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url + patron += host + 'episode/(.*?)/">([^<]+)</a>' # title de episodios + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle, scrapedname in matches: + scrapedtitle = scrapedtitle.replace('--', '0') + patron = '(\d+)x(\d+)' + match = re.compile(patron, re.DOTALL).findall(scrapedtitle) + season, episode = match[0] + + if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): + continue + + title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname) + new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, + contentType="episode", extra='episodios') + if 'infoLabels' not in new_item: + new_item.infoLabels = {} + + new_item.infoLabels['season'] = season + new_item.infoLabels['episode'] = episode.zfill(2) + + itemlist.append(new_item) + + # TODO no hacer esto si estamos añadiendo a la videoteca + if not item.extra: + # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos + tmdb.set_infoLabels(itemlist, __modo_grafico__) + for i in itemlist: + if i.infoLabels['title']: + # Si el capitulo tiene nombre propio añadirselo al titulo del item + i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[ + 'episode'], i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si el capitulo tiene imagen propia remplazar al poster + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: int(it.infoLabels['episode']), + reverse=config.get_setting('orden_episodios', __channel__)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + # Opción "Añadir esta serie a la videoteca" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data) + data = scrapertools.decodeHtmlentities(data) + patron = 'data-tplayernv="Opt(.*?)"><span>[^"<]+</span>(.*?)</li>' # option, servername, lang - quality + matches = re.compile(patron, re.DOTALL).findall(data) + + for option, quote in matches: + patron = '<span>(.*?) -([^<]+)</span' + match = re.compile(patron, re.DOTALL).findall(quote) + lang, quality = match[0] + quality = quality.strip() + lang = lang.lower().strip() + languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]', + 'castellano': '[COLOR green](CAST)[/COLOR]', + 'subtitulado': '[COLOR red](VOS)[/COLOR]'} + + if lang in languages: + lang = languages[lang] + + url_1 = scrapertools.find_single_match(data, + 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option) + new_data = httptools.downloadpage(url_1).data + new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data) + new_data = scrapertools.decodeHtmlentities(new_data) + patron1 = '<iframe width="560" height="315" src="([^"]+)"' + match1 = re.compile(patron1, re.DOTALL).findall(new_data) + + urls = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"') + servername = servertools.get_server_from_url(urls) + if 'stream.pelishd24.net' in urls: + vip_data = httptools.downloadpage(urls).data + dejuiced = generictools.dejuice(vip_data) + patron = '"file":"([^"]+)"' + match = re.compile(patron, re.DOTALL).findall(dejuiced) + for scrapedurl in match: + urls = scrapedurl + servername = 'gvideo' + if 'pelishd24.com/?trhide' in urls: + data = httptools.downloadpage(urls).data + # logger.error(texto='****hex'+data) + patron = '"file":"([^"]+)"' + match = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl in match: + urls = scrapedurl + servername = 'gvideo' + + title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % ( + servername.title(), quality, lang) + if 'embed.pelishd24.com' not in urls and 'embed.pelishd24.net' not in urls: + itemlist.append(item.clone(action='play', title=title, url=urls, language=lang, quality=quality, + text_color=color3)) + + for url in match1: + new_data = httptools.downloadpage(url).data + new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data) + new_data = scrapertools.decodeHtmlentities(new_data) + patron1 = '\["\d+","([^"]+)",\d+]' + match1 = re.compile(patron1, re.DOTALL).findall(new_data) + for url in match1: + url = url.replace('\\', '') + servername = servertools.get_server_from_url(url) + if 'pelishd24.net' in url or 'stream.pelishd24.com' in url: + vip_data = httptools.downloadpage(url).data + dejuiced = generictools.dejuice(vip_data) + patron = '"file":"([^"]+)"' + match = re.compile(patron, re.DOTALL).findall(dejuiced) + for scrapedurl in match: + url = scrapedurl + servername = 'gvideo' + + if 'ww3.pelishd24.com' in url: + data1 = httptools.downloadpage(url).data + url = scrapertools.find_single_match(data1, '"file": "([^"]+)"') + servername = 'gvideo' + + title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % ( + servername.title(), quality, lang) + + itemlist.append(item.clone(action='play', title=title, url=url, language=lang, quality=quality, + text_color=color3)) + + itemlist = servertools.get_servers_itemlist(itemlist) + + itemlist.sort(key=lambda it: it.language, reverse=False) + + # Requerido para FilterTools + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios': + itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + thumbnail=thumbnail_host, contentTitle=item.contentTitle)) + + return itemlist diff --git a/plugin.video.alfa/channels/pelisplay.json b/plugin.video.alfa/channels/pelisplay.json new file mode 100644 index 00000000..50402c29 --- /dev/null +++ b/plugin.video.alfa/channels/pelisplay.json @@ -0,0 +1,93 @@ +{ + "id": "pelisplay", + "name": "PelisPlay", + "active": true, + "adult": false, + "language": ["cast", "lat"], + "fanart": "https://s33.postimg.cc/d3ioghaof/image.png", + "thumbnail": "https://www.pelisplay.tv/static/img/logo.png", + "banner": "https://s33.postimg.cc/cyex6xlen/image.png", + "categories": [ + "movie", + "tvshow", + "vos" + ], + "settings": [ + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino" + ] + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 5", + "Perfil 4", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "orden_episodios", + "type": "bool", + "label": "Mostrar los episodios de las series en orden descendente", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - terror", + "default": true, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/pelisplay.py b/plugin.video.alfa/channels/pelisplay.py new file mode 100644 index 00000000..06902209 --- /dev/null +++ b/plugin.video.alfa/channels/pelisplay.py @@ -0,0 +1,414 @@ +# -*- coding: utf-8 -*- +# -*- Channel PelisPlay -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re +import sys +import urllib +import urlparse + +from channels import autoplay +from channels import filtertools +from core import httptools +from core import scrapertools +from core import servertools +from core.item import Item +from core import channeltools +from core import tmdb +from platformcode import config, logger +from channelselector import get_thumb + +__channel__ = "pelisplay" + +host = "https://www.pelisplay.tv/" + +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) + __perfil__ = int(config.get_setting('perfil', __channel__)) +except: + __modo_grafico__ = True + __perfil__ = 0 + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = "" + +headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] + +IDIOMAS = {'Latino': 'LAT'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload'] + + + +def mainlist(item): + logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, + viewcontent='movie', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")), + + item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow", + viewcontent='tvshow', viewmode="tvshow_with_plot", + thumbnail=get_thumb("channels_tvshow.png")), + + item.clone(title="Netflix", action="flixmovies", text_blod=True, extra='serie', mediatype="tvshow", + viewcontent='tvshows', viewmode="movie_with_plot", fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png', + thumbnail='http://img.app.kiwi/icon/jcbqFma-5e91cY9MlEasA-fvCRJK493MxphrqbBd8oS74FtYg00IXeOAn0ahsLprxIA'), + + item.clone(title="Buscar", action="search", text_blod=True, extra='buscar', + thumbnail=get_thumb('search.png'), url=host+'buscar')] + autoplay.show_option(item.channel, itemlist) + return itemlist + + +def menumovies(item): + logger.info() + itemlist = [item.clone(title="Estrenos", action="peliculas", text_blod=True, + viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"), + item.clone(title="Más Populares", action="peliculas", text_blod=True, + viewcontent='movie', url=host + 'peliculas?filtro=visitas', viewmode="movie_with_plot"), + item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True, + viewcontent='movie', url=host + 'peliculas?filtro=fecha_creacion', viewmode="movie_with_plot"), + item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año", + viewcontent='movie', url=host, viewmode="movie_with_plot"), + item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie', + viewcontent='movie', url=host+'peliculas', viewmode="movie_with_plot"), + item.clone(title="Buscar", action="search", text_blod=True, extra='buscarp', + thumbnail=get_thumb('search.png'), url=host+'peliculas')] + return itemlist + +def flixmovies(item): + logger.info() + itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion', + viewcontent='movie', viewmode="movie_with_plot"), + # item.clone(title="Estrenos", action="peliculas", text_blod=True, + # viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"), + item.clone(title="Más Vistas", action="peliculas", text_blod=True, + viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"), + item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True, + viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"), + item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año", + viewcontent='movie', url=host, viewmode="movie_with_plot"), + item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie', + viewcontent='movie', url=host+'netflix', viewmode="movie_with_plot")] + return itemlist + + +def menuseries(item): + logger.info() + itemlist = [item.clone(title="Novedades", action="series", text_blod=True, extra='serie', mediatype="tvshow", + viewcontent='tvshow', url=host + 'series', viewmode="tvshow_with_plot"), + + item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + viewcontent='tvshow', url=host + 'series?filtro=visitas', viewmode="tvshow_with_plot"), + + item.clone(title="Recíen Agregadas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + viewcontent='tvshow', url=host + 'series?filtro=fecha_actualizacion', viewmode="tvshow_with_plot"), + + item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie', + viewcontent='movie', url=host+'series', viewmode="movie_with_plot"), + item.clone(title="Buscar", action="search", text_blod=True, extra='buscars', + thumbnail=get_thumb('search.png'), url=host+'series')] + + return itemlist + + +def p_portipo(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + # logger.info(data) + action = '' + patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url + patron += '<div class="[^<]+<img class="[^"]+" src="/([^"]+)"></div><div class="[^"]+">([^<]+)</div>' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + if item.extra == 'movie': + action = 'peliculas' + elif item.extra == 'serie': + action = 'series' + itemlist.append(item.clone(action = action, + title = scrapedtitle, + url = scrapedurl, + thumbnail=scrapedthumbnail + )) + itemlist.sort(key=lambda it: it.title) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + # logger.info(data) + patron = '<img class="posterentrada" src="/([^"]+)".*?' # img + patron += '<a href="([^"]+)">.*?' # url + patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year + patron += '<div class="Description"> <div>([^<]+)</div>.*?' # plot + patron += '<strong>([^<]+)</strong></h4>' # title + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedthumbnail, scrapedurl, year, plot, scrapedtitle in matches: + if item.infoLabels['plot'] == '': + item.plot = plot + + itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle, + infoLabels={"year":year}, thumbnail=host+scrapedthumbnail, + url=scrapedurl, title=scrapedtitle, plot=plot)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">') + + if pagination: + itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »", + url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png"))) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = urlparse.urljoin(item.url, "?q={0}".format(texto)) + if item.extra == 'buscarp' or item.extra == 'buscars': + item.url = urlparse.urljoin(item.url, "?buscar={0}".format(texto)) + + try: + if item.extra == 'buscars': + return series(item) + return peliculas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + + pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />') + if pagination: + itemlist.append(Item(channel=item.channel, action="sub_search", + title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png"))) + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = host + 'movies/' + elif categoria == 'infantiles': + item.url = host + "genre/animacion/" + elif categoria == 'terror': + item.url = host + "genre/terror/" + else: + return [] + + itemlist = peliculas(item) + if itemlist[-1].title == "» Siguiente »": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + # logger.info(data) + patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?' # img, url + patron += 'class="link-title"><h2>([^<]+)</h2>' # title + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie', + url=scrapedurl, thumbnail=host+scrapedthumbnail, + contentSerieName=scrapedtitle, show=scrapedtitle, + action="temporadas", contentType='tvshow')) + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">') + + if pagination: + itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination, + thumbnail=get_thumb("next.png"))) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + from core import jsontools + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + # logger.info(data) + patron = '<img class="posterentrada" src="/([^"]+)" alt="\w+\s*(\w+).*?' + patron += 'class="abrir_temporada" href="([^"]+)">' # img, season + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) > 1: + for scrapedthumbnail, temporada, url in matches: + new_item = item.clone(action="episodios", season=temporada, url=url, + thumbnail=host+scrapedthumbnail, extra='serie') + new_item.infoLabels['season'] = temporada + new_item.extra = "" + itemlist.append(new_item) + tmdb.set_infoLabels(itemlist, __modo_grafico__) + for i in itemlist: + i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) + if i.infoLabels['title']: + # Si la temporada tiene nombre propio añadírselo al titulo del item + i.title += " - %s" % (i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si la temporada tiene poster propio remplazar al de la serie + i.thumbnail = i.infoLabels['poster_path'] + itemlist.sort(key=lambda it: it.title) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) + return itemlist + else: + return episodios(item) + + +def episodios(item): + logger.info() + itemlist = [] + from core import jsontools + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + # logger.info(data) + post_link = '%sentradas/abrir_temporada' % host + token = scrapertools.find_single_match(data, 'data-token="([^"]+)">') + data_t = scrapertools.find_single_match(data, '<a data-s="[^"]+" data-t="([^"]+)"') + data_s = scrapertools.find_single_match(data, '<a data-s="([^"]+)" data-t="[^"]+"') + post= {'t':data_t, 's':data_s, '_token':token} + post = urllib.urlencode(post) + new_data = httptools.downloadpage(post_link, post=post).data + # json_data = jsontools.load(new_data) + # logger.info(new_data) + patron = '"nepisodio":"([^"]+)",[^,]+,"ntemporada":"([^"]+)".*?"url_directa":"([^"]+)",.*?"titulo":"([^"]+)",' + + matches = re.compile(patron, re.DOTALL).findall(new_data) + for episode, season, scrapedurl, scrapedname in matches: + scrapedurl = scrapedurl.replace('\\', '') + logger.info('###name%s' % scrapedname) + + if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): + continue + title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname)) + new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, + contentType="episode", extra='serie') + if 'infoLabels' not in new_item: + new_item.infoLabels = {} + new_item.infoLabels['season'] = season + new_item.infoLabels['episode'] = episode.zfill(2) + itemlist.append(new_item) + # TODO no hacer esto si estamos añadiendo a la videoteca + if not item.extra: + # Obtenemos los datos de todos los capítulos de la temporada mediante multihilos + tmdb.set_infoLabels(itemlist, __modo_grafico__) + for i in itemlist: + if i.infoLabels['title']: + # Si el capitulo tiene nombre propio añadírselo al titulo del item + i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si el capitulo tiene imagen propia remplazar al poster + i.thumbnail = i.infoLabels['poster_path'] + itemlist.sort(key=lambda it: int(it.infoLabels['episode']), + reverse=config.get_setting('orden_episodios', __channel__)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + # Opción "Añadir esta serie a la videoteca" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) + return itemlist + + +def findvideos(item): + logger.info() + from lib import generictools + from core import jsontools + import urllib + import base64 + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?' # data-player, servername + patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>' # quality, lang + matches = re.compile(patron, re.DOTALL).findall(data) + + for data_player, servername, quality, lang in matches: + post_link = '%sentradas/procesar_player' % host + token = scrapertools.find_single_match(data, 'data-token="([^"]+)">') + post= {'data':data_player, 'tipo':'videohost', '_token':token} + post = urllib.urlencode(post) + new_data = httptools.downloadpage(post_link, post=post).data + json_data = jsontools.load(new_data) + url = json_data['data'] + + if 'pelisplay.tv/embed/' in url: + new_data = httptools.downloadpage(url).data + url = scrapertools.find_single_match(new_data, '"file":"([^"]+)",').replace('\\', '') + + elif 'fondo_requerido' in url: + link = scrapertools.find_single_match(url, '=(.*?)&fondo_requerido').partition('&')[0] + post_link = '%sprivate/plugins/gkpluginsphp.php' % host + post= {'link':link} + post = urllib.urlencode(post) + new_data2 = httptools.downloadpage(post_link, post=post).data + url = scrapertools.find_single_match(new_data2, '"link":"([^"]+)"').replace('\\', '') + + lang = lang.lower().strip() + idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]', + 'castellano': '[COLOR green](CAST)[/COLOR]', + 'subtitulado': '[COLOR red](VOS)[/COLOR]'} + if lang in idioma: + lang = idioma[lang] + + title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (servername.title(), quality, lang) + + itemlist.append(item.clone(channel=__channel__, title=title, action='play', language=lang, quality=quality, url=url)) + + + itemlist = servertools.get_servers_itemlist(itemlist) + itemlist.sort(key=lambda it: it.language, reverse=False) + # Requerido para FilterTools + itemlist = filtertools.get_links(itemlist, item, list_language) + # Requerido para AutoPlay + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie': + itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle)) + return itemlist diff --git a/plugin.video.alfa/channels/thumbzilla.json b/plugin.video.alfa/channels/thumbzilla.json index 09d9dfa4..fcce22f8 100644 --- a/plugin.video.alfa/channels/thumbzilla.json +++ b/plugin.video.alfa/channels/thumbzilla.json @@ -3,11 +3,11 @@ "name": "ThumbZilla", "active": true, "adult": true, - "language": "*", + "language": "en", "fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg", - "thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png", + "thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png?cache=2018110203", "banner": "", - "categories": [ + "categories": [ "adult" ], "settings": [ @@ -35,4 +35,3 @@ } ] } - diff --git a/plugin.video.alfa/channels/thumbzilla.py b/plugin.video.alfa/channels/thumbzilla.py index d238d1f3..b17b9a65 100644 --- a/plugin.video.alfa/channels/thumbzilla.py +++ b/plugin.video.alfa/channels/thumbzilla.py @@ -44,28 +44,36 @@ def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host, - viewmode="movie", thumbnail=get_thumb("/channels_adult.png"))) + viewmode="movie", thumbnail=get_thumb("channels_adult.png"))) + itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + '/newest', action="videos", viewmode="movie_with_plot", viewcontent='movies', thumbnail=get_thumb("channels_adult.png"))) - itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/trending', + + itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/tending', action="videos", viewmode="movie_with_plot", viewcontent='movies', thumbnail=get_thumb("channels_adult.png"))) + itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + '/top', action="videos", viewmode="movie_with_plot", viewcontent='movies', thumbnail=get_thumb("channels_adult.png"))) + itemlist.append(Item(channel=__channel__, title="Populares", url=host + '/popular', action="videos", viewmode="movie_with_plot", viewcontent='movies', thumbnail=get_thumb("channels_adult.png"))) + itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + '/hd', action="videos", viewmode="movie_with_plot", viewcontent='movies', thumbnail=get_thumb("channels_adult.png"))) + itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd', action="videos", viewmode="movie_with_plot", viewcontent='homemade', thumbnail=get_thumb("channels_adult.png"))) + itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias", url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies', thumbnail=get_thumb("channels_adult.png"))) + itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, thumbnail=get_thumb("channels_adult.png"), extra="buscar")) return itemlist @@ -92,6 +100,7 @@ def search(item, texto): def videos(item): logger.info() itemlist = [] + data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<a class="[^"]+" href="([^"]+)">' # url @@ -99,15 +108,20 @@ def videos(item): patron += '<span class="title">([^<]+)</span>.*?' # title patron += '<span class="duration">([^<]+)</span>' # time matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches: title = "[%s] %s" % (time, scrapedtitle) + itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail, url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail)) + paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '') + if paginacion: itemlist.append(Item(channel=item.channel, action="videos", thumbnail=thumbnail % 'rarrow', title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) + return itemlist @@ -116,9 +130,12 @@ def categorias(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) + # logger.info(data) patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url patron += '<span class="count">([^<]+)</span>' # title, vids + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, vids in matches: scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title() title = "%s (%s)" % (scrapedtitle, vids.title()) @@ -127,17 +144,14 @@ def categorias(item): itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail, title=title, url=url, thumbnail=thumbnail, viewmode="movie_with_plot", folder=True)) + return itemlist def play(item): itemlist = [] data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - patron = '<li><a class="qualityButton active" data-quality="([^"]+)">([^"]+)</a></li>' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl,calidad in matches: - title = "[COLOR yellow](%s)[/COLOR] %s" % (calidad, item.contentTile) - itemlist.append(item.clone(channel=item.channel, action="play", title=item.title , url=scrapedurl , folder=True) ) - return itemlist + url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '') + itemlist.append(item.clone(url=url, title=item.contentTile)) + return itemlist diff --git a/plugin.video.alfa/channels/xms.py b/plugin.video.alfa/channels/xms.py index ff4fb4ef..3e4413de 100644 --- a/plugin.video.alfa/channels/xms.py +++ b/plugin.video.alfa/channels/xms.py @@ -13,6 +13,7 @@ from platformcode import config, logger __channel__ = "xms" host = 'https://xxxmoviestream.com/' +host1 = 'https://www.cam4.com/' try: __modo_grafico__ = config.get_setting('modo_grafico', __channel__) __perfil__ = int(config.get_setting('perfil', __channel__)) @@ -41,7 +42,6 @@ thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/ def mainlist(item): logger.info() - itemlist = [] itemlist.append(Item(channel=__channel__, title="Últimas", url=host + '?filtre=date&cat=0', @@ -60,32 +60,50 @@ def mainlist(item): url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies', thumbnail=thumbnail % '4')) + itemlist.append(Item(channel=__channel__, title="WebCam", action="webcamenu", + viewmode="movie_with_plot", viewcontent='movies', + thumbnail='https://ae01.alicdn.com/kf/HTB1LDoiaHsrBKNjSZFpq6AXhFXa9/-.jpg')) + itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, thumbnail=thumbnail % '5')) return itemlist +def webcamenu(item): + logger.info() + itemlist = [item.clone(title="Trending Cams", action="webcam", text_blod=True, url=host1, + viewcontent='movies', viewmode="movie_with_plot"), + item.clone(title="Females", action="webcam", text_blod=True, + viewcontent='movies', url=host1 + 'female', viewmode="movie_with_plot"), + item.clone(title="Males", action="webcam", text_blod=True, + viewcontent='movies', url=host1 + 'male', viewmode="movie_with_plot"), + item.clone(title="Couples", action="webcam", text_blod=True, + viewcontent='movies', url=host1 + 'couple', viewmode="movie_with_plot"), + item.clone(title="Trans", action="webcam", text_blod=True, extra="Películas Por año", + viewcontent='movies', url=host1 + 'transgender', viewmode="movie_with_plot")] + return itemlist + + def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data) - # logger.info(data) patron_todos = '<div id="content">(.*?)<div id="footer"' data = scrapertools.find_single_match(data, patron_todos) - patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img - patron += '<a href="([^"]+)" title="([^"]+)".*?' #url, title - patron += '<div class="right"><p>([^<]+)</p>' # plot + patron += '<a href="([^"]+)" title="([^"]+)".*?' # url, title + patron += '<div class="right"><p>([^<]+)</p>' # plot matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches: plot = scrapertools.decodeHtmlentities(plot) itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(), - url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail, - viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle)) + url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, + fanart=scrapedthumbnail,viewmode="movie_with_plot", + folder=True, contentTitle=scrapedtitle)) # Extrae el paginador paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)">Next ›</a></li><li>') paginacion = urlparse.urljoin(item.url, paginacion) @@ -95,6 +113,36 @@ def peliculas(item): thumbnail=thumbnail % 'rarrow', title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) + return itemlist + + +def webcam(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data) + patron = '<div class="profileBox">.*?<a href="/([^"]+)".*?' # url + patron += 'data-hls-preview-url="([^"]+)">.*?' # video_url + patron += 'data-username="([^"]+)".*?' # username + patron += 'title="([^"]+)".*?' # title + patron += 'data-profile="([^"]+)" />' # img + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches: + scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '') + + itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle, + url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, + viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle)) + # Extrae el paginador + paginacion = scrapertools.find_single_match(data, '<span id="pagerSpan">\d+</span> <a href="([^"]+)"') + paginacion = urlparse.urljoin(item.url, paginacion) + + if paginacion: + itemlist.append(Item(channel=__channel__, action="webcam", + thumbnail=thumbnail % 'rarrow', + title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) return itemlist @@ -104,10 +152,9 @@ def categorias(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) - # logger.info(data) - patron = 'data-lazy-src="([^"]+)".*?' # img - patron += '</noscript><a href="([^"]+)".*?' # url - patron += '<span>([^<]+)</span></a>.*?' # title + patron = 'data-lazy-src="([^"]+)".*?' # img + patron += '</noscript><a href="([^"]+)".*?' # url + patron += '<span>([^<]+)</span></a>.*?' # title patron += '<span class="nb_cat border-radius-5">([^<]+)</span>' # num_vids matches = re.compile(patron, re.DOTALL).findall(data) @@ -143,16 +190,15 @@ def sub_search(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) - - patron = 'data-lazy-src="([^"]+)".*?' # img - patron += 'title="([^"]+)" />.*?' # title - patron += '</noscript><a href="([^"]+)".*?' # url + patron = 'data-lazy-src="([^"]+)".*?' # img + patron += 'title="([^"]+)" />.*?' # title + patron += '</noscript><a href="([^"]+)".*?' # url patron += '<div class="right"><p>([^<]+)</p>' # plot matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail, - action="findvideos", thumbnail=scrapedthumbnail)) + action="findvideos", thumbnail=scrapedthumbnail)) paginacion = scrapertools.find_single_match( data, "<a href='([^']+)' class=\"inactive\">\d+</a>") @@ -168,8 +214,6 @@ def findvideos(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - # logger.info(data) - patron = '<iframe src="[^"]+".*?<iframe src="([^"]+)" scrolling="no" frameborder="0"' matches = scrapertools.find_multiple_matches(data, patron) @@ -179,5 +223,4 @@ def findvideos(item): itemlist.append(item.clone(action='play', title=title, server=server, url=url)) - return itemlist From 6543e5512ceb8ee5f2e363c64cf718499578cb31 Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Wed, 5 Dec 2018 12:56:10 -0500 Subject: [PATCH 24/24] v2.7.17 --- plugin.video.alfa/addon.xml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 2485539d..b8bd9379 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<addon id="plugin.video.alfa" name="Alfa" version="2.7.16" provider-name="Alfa Addon"> +<addon id="plugin.video.alfa" name="Alfa" version="2.7.17" provider-name="Alfa Addon"> <requires> <import addon="xbmc.python" version="2.1.0"/> <import addon="script.module.libtorrent" optional="true"/> @@ -19,13 +19,15 @@ </assets> <news>[B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Arreglos[/B][/COLOR] - ¤ DivxTotal ¤ Pelismagnet ¤ Subtorrents - ¤ Todopleiculas ¤ Zonatorrent ¤ maxipelis24 - ¤ Danimados ¤ mediafire ¤ mp4upload - ¤ vevio ¤ BlogHorror ¤ PelisPlus - ¤ TuPeliculas ¤ VeSeriesOnline ¤ Actualizado pack +18 + ¤ Todopeliculas ¤ Maxipelis24 ¤ allcalidad + ¤ descargacineclasico ¤ porntrex ¤ seriesmetro + ¤ pedropolis ¤ thumzilla ¤ xms - ¤ Agradecimientos a @paeznet y @chivmalev por colaborar con ésta versión + [COLOR green][B]Novedades[/B][/COLOR] + ¤ cine24h ¤ hdfilmologia ¤ pelis24 + ¤ pelishd24 ¤ pelisplay + + ¤ Agradecimientos a @chivmalev por colaborar con ésta versión </news> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>