KoD 0.5.1

-override DNS di default
-nuovi canali: streamtime e netfreex
-fixato cloudflare
-aggiunta opzione apri nel browser
This commit is contained in:
marco
2019-11-30 20:27:09 +01:00
parent 4332859f47
commit daad0d3ddb
176 changed files with 31086 additions and 13812 deletions

View File

@@ -1,4 +1,4 @@
<addon id="plugin.video.kod" name="Kodi on Demand" provider-name="KOD Team" version="0.5">
<addon id="plugin.video.kod" name="Kodi on Demand" provider-name="KOD Team" version="0.5.1">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,10 @@
<screenshot>resources/media/themes/ss/2.png</screenshot>
<screenshot>resources/media/themes/ss/3.png</screenshot>
</assets>
<news>-Ridefinito il modo in cui vengono scritti i canali, per assicurare migliore stabilità, debuggabilità e coerenza
-Riscritti di conseguenza molti canali, corregendo di fatto moltissimi problemi che avete segnalato
-Quando aggiungi in videoteca da fonti in più lingue (ita/sub ita) o più qualità, ti viene chiesto quale tipo di fonte vuoi.
-Per gli amanti degli anime, aggiunto VVVVID (senza bisogno di account!)
-Aggiunti i server supervideo e hdload, fixato wstream
-migliorie varie</news>
<news>-override DNS di default
-nuovi canali: streamtime e netfreex
-fixato cloudflare
-aggiunta opzione &quot;apri nel browser&quot;</news>
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]
[COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR]</disclaimer>

View File

@@ -17,27 +17,22 @@
"documentaristreamingda": "https://documentari-streaming-da.com",
"dreamsub": "https://www.dreamsub.stream",
"eurostreaming": "https://eurostreaming.pink",
"fastsubita": "https://fastsubita.com",
"fastsubita": "http://fastsubita.com",
"filmgratis": "https://www.filmaltadefinizione.net",
"filmigratis": "https://filmigratis.org",
"filmpertutti": "https://www.filmpertutti.link",
"filmsenzalimiti": "https://filmsenzalimiti.best",
"filmsenzalimiticc": "https://www.filmsenzalimiti.press",
"filmsenzalimiticc": "https://www.filmsenzalimiti.press",
"filmstreaming01": "https://filmstreaming01.com",
"guardafilm": "http://www.guardafilm.top",
"guardarefilm": "https://www.guardarefilm.red",
"guardarefilm": "https://www.guardarefilm.red",
"guardaserie_stream": "https://guardaserie.co",
"guardaseriecc": "https://guardaserie.site",
"guardaserieclick": "https://www.guardaserie.media",
"guardogratis": "https://guardogratis.net",
"ilgeniodellostreaming": "https://igds.red",
"italiafilm": "https://www.italia-film.video",
"guardaserieclick": "https://www.guardaserie.media",
"ilgeniodellostreaming": "https://igds.red",
"italiafilm": "https://www.italia-film.pw",
"italiaserie": "https://italiaserie.org",
"itastreaming": "https://itastreaming.film",
"mondolunatico": "http://mondolunatico.org",
"mondolunatico": "http://mondolunatico.org",
"mondolunatico2": "https://mondolunatico.org:443/stream",
"mondoserietv": "https://mondoserietv.com",
"piratestreaming": "https://www.piratestreaming.media",
"piratestreaming": "https://www.piratestreaming.top",
"polpotv": "https://polpo.tv",
"seriehd": "https://www.seriehd.moda",
"serietvonline": "https://serietvonline.best",
@@ -48,5 +43,6 @@
"tantifilm": "https://www.tantifilm.eu",
"toonitalia": "https://toonitalia.org",
"vedohd": "https://vedohd.video",
"vvvvid": "https://www.vvvvid.it"
"vvvvid": "https://www.vvvvid.it",
"netfreex": "https://www.netfreex.online"
}

View File

@@ -124,3 +124,12 @@ def findvideos(item):
contentType=item.contentType,
folder=False))
return support.server(item, itemlist=itemlist)

View File

@@ -1,15 +0,0 @@
{
"id": "bleachportal",
"name": "BleachPortal",
"language": ["sub-ita"],
"active": true,
"deprecated": true,
"adult": false,
"fanart": "https://www.thetvdb.com/banners/fanart/original/74796-29.jpg",
"thumbnail": "bleachportal.png",
"banner": "bleachportal.png",
"categories": ["anime"],
"not_active":["include_in_newests", "include_in_global_search"],
"settings": []
}

View File

@@ -1,119 +0,0 @@
# -*- coding: utf-8 -*-
# Ringraziamo Icarus crew
# ------------------------------------------------------------
# XBMC Plugin
# Canale per http://bleachportal.it
# ------------------------------------------------------------
import re
from core import scrapertools, httptools
from core.item import Item
from platformcode import logger
from platformcode import config
from core import support
host = "http://www.bleachportal.it"
def mainlist(item):
logger.info("[BleachPortal.py]==> mainlist")
itemlist = [Item(channel=item.channel,
action="episodi",
title= support.typo('Bleach','bold'),
url=host + "/streaming/bleach/stream_bleach.htm",
thumbnail="https://www.thetvdb.com/banners/posters/74796-14.jpg",
banner="https://www.thetvdb.com/banners/graphical/74796-g6.jpg",
fanart="https://www.thetvdb.com/banners/fanart/original/74796-30.jpg",
extra="bleach"),
Item(channel=item.channel,
action="episodi",
title=support.typo('D.Gray Man','bold'),
url=host + "/streaming/d.gray-man/stream_dgray-man.htm",
thumbnail="https://www.thetvdb.com/banners/posters/79635-1.jpg",
banner="https://www.thetvdb.com/banners/graphical/79635-g4.jpg",
fanart="https://www.thetvdb.com/banners/fanart/original/79635-6.jpg",
extra="dgrayman")]
return itemlist
def episodi(item):
logger.info("[BleachPortal.py]==> episodi")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">\D+([\d\-]+)\s?<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
animetitle = "Bleach" if item.extra == "bleach" else "D.Gray Man"
for scrapednumber, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.decode('latin1').encode('utf8')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=support.typo("%s Episodio %s" % (animetitle, scrapednumber),'bold'),
url=item.url.replace("stream_bleach.htm",scrapedurl) if "stream_bleach.htm" in item.url else item.url.replace("stream_dgray-man.htm", scrapedurl),
plot=scrapedtitle,
extra=item.extra,
thumbnail=item.thumbnail,
fanart=item.fanart,
fulltitle="%s Ep: %s | %s" % (animetitle, scrapednumber, scrapedtitle)))
if item.extra == "bleach":
itemlist.append(
Item(channel=item.channel,
action="oav",
title=support.typo("OAV e Movies",'bold color kod'),
url=item.url.replace("stream_bleach.htm", "stream_bleach_movie_oav.htm"),
extra=item.extra,
thumbnail=item.thumbnail,
fanart=item.fanart))
return list(reversed(itemlist))
def oav(item):
logger.info("[BleachPortal.py]==> oav")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">-\s+(.*?)<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapednumber, scrapedtitle, scrapedurl in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=support.typo(scrapednumber, 'bold'),
url=item.url.replace("stream_bleach_movie_oav.htm", scrapedurl),
plot=scrapedtitle,
extra=item.extra,
thumbnail=item.thumbnail,
fulltitle=scrapednumber + " | " + scrapedtitle))
return list(reversed(itemlist))
def findvideos(item):
logger.info("[BleachPortal.py]==> findvideos")
itemlist = []
if "bleach//" in item.url:
item.url = re.sub(r'\w+//', "", item.url)
data = httptools.downloadpage(item.url).data
if "bleach" in item.extra:
video = scrapertools.find_single_match(data, 'file: "(.*?)",')
else:
video = scrapertools.find_single_match(data, 'file=(.*?)&').rsplit('/', 1)[-1]
itemlist.append(
Item(channel=item.channel,
action="play",
title="Diretto %s" % item.title,
url=item.url.replace(item.url.split("/")[-1], "/" + video),
thumbnail=item.thumbnail,
fulltitle=item.fulltitle))
return itemlist

View File

@@ -199,11 +199,9 @@ def findvideos(item):
if item.contentType != 'movie':
return support.server(item, item.url)
else:
block = r'<div class="col-md-10">(.+?)<div class="swappable" id="links">'
patron = r'SRC="([^"]+)"'
links = re.findall(patron, block, re.IGNORECASE)
if "#" in links:
links = link.replace('#', 'speedvideo.net')
links = str(support.match(item, r'SRC="([^"]+)"', patronBlock=r'<div class="col-md-10">(.+?)<div class="swappable" id="links">')[0])
if links:
links = links.replace('#', 'speedvideo.net')
return support.server(item, links)
else:
return support.server(item)

View File

@@ -6,6 +6,6 @@
"adult": false,
"thumbnail": "cb01.png",
"banner": "cb01.png",
"categories": ["tvshow", "movie", "vos"],
"categories": ["tvshow", "movie", "vos", "documentary"],
"settings": []
}

View File

@@ -50,6 +50,9 @@ def mainlist(item):
('Per anno', ['/serietv/', 'menu', 'Serie-Tv per Anno']),
('Ultime Aggiornate', ['/serietv/', 'peliculas', 'newest'])
]
docu = [('Documentari bullet bold', ['/category/documentario/', 'peliculas']),
('HD submenu', ['category/hd-alta-definizione/documentario-hd/', 'peliculas'])
]
return locals()

View File

@@ -6,7 +6,7 @@
"adult": false,
"thumbnail": "cinemalibero.png",
"banner": "cinemalibero.png",
"categories": ["movie","tvshow","anime"],
"not_active": ["include_in_newest"],
"categories": ["movie","tvshow"],
"not_active": ["include_in_newest_anime"],
"settings": []
}

View File

@@ -3,29 +3,15 @@
# Canale per 'cinemaLibero'
# ------------------------------------------------------------
"""
Questi sono commenti per i beta-tester.
Su questo canale in:
- Cerca ( nel canale ) e Ricerca Globale
- SerieTV e novità del canale
- Novità -> SerieTV
non saranno presenti le voci:
- 'Aggiungi alla Videoteca',
- 'Scarica Serie'
- NON SONO PRESENTI IN NOVITà GLOBALE E del CANALE RIGUARDANTI LO SPORT!!!!
dunque, la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
25 titoli per le novità
NON CONTROLLARE LA SEZIONE SPORT, HA PROBLEMI!!!
NON CONTROLLARE LE SEZIONE SPORT - ANIME, HANNO PROBLEMI!!!
è stata eliminata dall'elenco ma i titoli possono apparire nella ricerca o tra le novità
Non è errore se dà problemi!!! NON CONSIDERATELA!
Novità. Indicare in quale/i sezione/i è presente il canale:
- FILM
Avvisi:
- Eventuali avvisi per i tester
Ulteriori info:
- FILM - SERIE
"""
@@ -35,52 +21,62 @@ from core import httptools, support, scrapertoolsV2
from core.item import Item
from platformcode import config
list_servers = ['akstream', 'wstream', 'backin', 'verystream', 'openload', 'streamango']
list_servers = ['akstream', 'wstream', 'backin']
list_quality = ['default']
__channel__ = "cinemalibero"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
@support.menu
def mainlist(item):
support.log()
film = ['/category/film/',
('Novità', ['', 'peliculas', 'update']),
('Generi', ['', 'genres'])
]
tvshow = ['/category/serie-tv/'
]
Anime = [(support.typo('Anime', 'bullet bold'),['/category/anime-giapponesi/', 'peliculas', 'anime', 'tvshow'])
]
## Sport = [(support.typo('Sport', 'bullet bold'), ['/category/sport/', 'peliculas', '', 'tvshow'])
## Anime = [(support.typo('Anime', 'bullet bold'),['/category/anime-giapponesi/', 'peliculas', 'anime', 'tvshow'])
## ]
## news = [('Novità Serie-Anime', ['/aggiornamenti-serie-tv/', 'peliculas', 'update', 'tvshow'])]
## Sport = [(support.typo('Sport', 'bullet bold'), ['/category/sport/', 'peliculas', 'sport', 'tvshow'])
## ]
news = [('Novità Serie-Anime', ['/aggiornamenti-serie-tv/', 'peliculas', 'update', 'tvshow'])]
search = ''
return locals()
@support.scrape
#def video(item):
def peliculas(item):
support.log()
#debug = True
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
GENERI = scrapertoolsV2.find_multiple_matches(data, r'<a class="dropdown-item" href="[^"]+" title="([A-z]+)"')
patronBlock = r'<div class="container">.*?class="col-md-12[^"]*?">(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
if item.contentType == 'movie':
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?P<quality>[^<]+)?<'
elif item.args == 'anime':
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?:.+?[^fFiInNeE]+?\(?(?P<lang>[sSuUbBiItTaA]+)\)?.+?)<'
elif item.args == 'update':
action = 'select'
patron = r'<div class="card-body p-0">\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">\s<div class="titolo">(?P<title>.+?)(?: &#8211; Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>[ ]<div class="genere">(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>.+?)(?:[ ]\[(?P<lang>[sSuUbB\-iItTaA]+)\])?(?:[ ]\((?P<year>\d{4})\))?"\salt="[^"]+"\sclass="[^"]+"(?: style="background-image: url\((?P<thumb>.+?)\)">)?<div class="voto">[^>]+>[^>]+>.(?P<rating>[\d\.a-zA-Z\/]+)?[^>]+>[^>]+>[^>]+>(?:<div class="genere">(?P<quality>[^<]+)</div>)?'
if item.args == 'update':
patronBlock = r'<section id="slider">(?P<block>.*?)</section>'
patron = r'<a href="(?P<url>(?:https:\/\/.+?\/(?P<title>[^\/]+[a-zA-Z0-9\-]+)(?P<year>\d{4})))/".+?url\((?P<thumb>[^\)]+)\)">'
elif item.contentType == 'tvshow':
action = 'episodios'
patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
if item.args == 'anime':
anime = True
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?:.+?[^fFiInNeE]+?\(?(?P<lang>[sSuUbBiItTaA]+)\)?.+?)<'
elif item.args == 'update':
patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">\s<div class="titolo">(?P<title>.+?)(?: &#8211; Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>[ ]<div class="genere">(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
pagination = 25
else:
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
#search
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
def itemHook(item):
if item.lang2:
@@ -89,56 +85,60 @@ def peliculas(item):
item.contentLanguage = item.lang2
item.title += support.typo(item.lang2, '_ [] color kod')
if item.contentType == 'movie':
item.action = 'findvideos'
elif item.args == 'anime' or item.args == 'update' or item.args == 'search':
item.action = 'select'
elif item.contentType == 'tvshow':
item.extra = 'serie'
dataBlock = httptools.downloadpage(item.url, headers=headers).data
genere = scrapertoolsV2.find_single_match(dataBlock, r'rel="category tag">([a-zA-Z0-9]+).+?<')
if genere.lower() in str(GENERI).lower():
item.contentType = 'movie'
action = 'findvideos'
if item.args == 'update':
item.title = item.title.replace('-',' ')
elif genere.lower() == 'serie':
item.action = 'episodios'
item.contentType = 'tvshow'
elif genere.lower() == 'anime':
blockAnime = scrapertoolsV2.find_single_match(dataBlock, r'<div id="container" class="container">(.+?)<div style="margin-left')
if 'stagione' in blockAnime.lower() or 'episodio' in blockAnime.lower() or 'saga' in blockAnime.lower():
anime = True
item.action = 'episodios'
item.contentType = 'tvshow'
else:
item.contentType = 'movie'
item.action = 'findvideos'
item.url = scrapertoolsV2.find_single_match(blockAnime, r'<span class="txt_dow">(?:.+?)?Streaming:(?:.+?)?</span>(.*?)<div style="margin-left:')
else:
item.action = 'select'
# Tutto il resto!!
pass
return item
patronNext = r'<a class="next page-numbers".*?href="([^"]+)">'
#debug = True
return locals()
@support.scrape
def episodios(item): # Questa def. deve sempre essere nominata episodios
support.log()
def episodios(item):
if item.extra == 'serie' or item.contentType == 'tvshow':
if item.args == 'anime':
support.log("Anime :", item)
blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo']
patron = r'(Stagione (?P<season>\d+))?.+?<a target=(?P<url>[^>]+>(?P<title>.+?(?P<episode>\d+)))(?:[:]?.+?)(?:</a></p>|</a><br />)'
patronBlock = r'Stagione (?P<season>\d+)</span><br />(?P<block>.*?)(?:<div style="margin-left:|<span class="txt_dow">)'
item.contentType = 'tvshow'
item.contentSerieName = item.fulltitle
else:# item.extra == 'serie':
support.log("Serie :", item)
patron = r'(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?[ ]?(?:(?P<title>[^<]+)(?P<url>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)'
patronBlock = r'<p><strong>(?P<block>(?:.+?[Ss]tagione.+?(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?(?:|.+?|</strong>)(/?:</span>)?</p>.*?</p>)'
item.contentType = 'tvshow'
item.contentSerieName = item.fulltitle
elif item.args == 'anime':
support.log("Anime :", item)
blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555']
#patron = r'(?:href="[ ]?(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+))<|(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?(?:(\4[^<]+)(\2.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)'
#patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>(Epis|).+?(?P<episode>\d+)?)(?:\((?P<lang>Sub ITA)\))?</a>(?:<br />)?'
patron = r'<a target=(?P<url>.+?(?:rel="noopener noreferrer">(?P<title>[^<]+)))</a>.+?(?:</a></p>|</a><br />)'
patronBlock = r'Streaming.+?:(?P<block>.*?)</div>'
#patronBlock = r'(?:<p>)?(?P<block>.*?)(?:</a><br /> |</p><div)'
item.contentType = 'tvshow'
item.contentSerieName = item.fulltitle
else:
support.log('extra = else --- select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
show=item.fulltitle,
contentType='movie'))
#debug = True
return locals()
@support.scrape
def genres(item):
support.log()
action='peliculas'
patron_block=r'<div id="bordobar" class="dropdown-menu(?P<block>.*?)</li>'
@@ -147,46 +147,6 @@ def genres(item):
return locals()
def select(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data, r'Streaming\s?[\w]+?:(.*?)<\/div>')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
support.log('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
contentSerieName = item.fulltitle,
url=item.url,
extra='serie',
contentType='episode'))
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodio', block, re.IGNORECASE) or re.findall('episodi streaming', block, re.IGNORECASE) or \
re.findall('numero stagioni', data, re.IGNORECASE):
support.log('select = ### è un anime ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
contentSerieName = item.fulltitle,
url=item.url,
args='anime',
contentType='episode'))
else:
support.log('select anime ELSE = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
else:
support.log('select ELSE = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
def search(item, texto):
support.log(item.url,texto)
item.url = host + "/?s=" + texto
@@ -209,29 +169,26 @@ def newest(categoria):
if categoria == 'peliculas':
item.url = host+'/category/film/'
item.contentType = 'movie'
## item.action = 'peliculas'
## itemlist = peliculas(item)
## elif categoria == 'series':
## item.contentType = 'tvshow'
## item.args = 'update'
## item.url = host+'/aggiornamenti-serie-tv/'
elif categoria == 'series' or categoria == 'anime':
item.args = 'update'
item.url = host+'/aggiornamenti-serie-tv/'
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
## if itemlist[-1].action == 'peliculas':
## itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
support.log('newest log: ', (line))
return []
return itemlist
def findvideos(item):
support.log('findvideos ->', item)
if item.contentType == 'movie':
if item.contentType == 'movie' and item.args != 'anime':
return support.server(item)
else:
return support.server(item, data= item.url)

View File

@@ -1,70 +0,0 @@
{
"id": "filmsenzalimiti",
"name": "Filmsenzalimiti",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "filmsenzalimiti.png",
"banner": "filmsenzalimiti.png",
"categories": ["movie","tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "2", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,222 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Filmsenzalimiti
# ------------------------------------------------------------
"""
Trasformate le sole def per support.menu e support.scrape
da non inviare nel test.
Test solo a trasformazione completa
"""
import re
from core import scrapertools, servertools, httptools, support
from core.item import Item
from platformcode import config
from platformcode import logger
from specials import autoplay
__channel__ = 'filmsenzalimiti'
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', 'vidoza', 'okru']
list_quality = ['1080p', '720p', '480p', '360']
checklinks = config.get_setting('checklinks', 'filmsenzalimiti')
checklinks_number = config.get_setting('checklinks_number', 'filmsenzalimiti')
headers = [['Referer', host]]
def mainlist(item):
logger.info('[filmsenzalimiti.py] mainlist')
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
action='video',
title='Film',
contentType='movie',
url=host,
thumbnail= ''),
Item(channel=item.channel,
action='video',
title='Novità',
contentType='movie',
url=host + '/category/nuove-uscite',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='In Sala',
contentType='movie',
url=host + '/category/in-sala',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Sottotitolati',
contentType='movie',
url=host + '/category/sub-ita',
thumbnail=''),
Item(channel=item.channel,
action='sottomenu',
title='[B]Categoria[/B]',
contentType='movie',
url=host,
thumbnail=''),
Item(channel=item.channel,
action='search',
extra='tvshow',
title='[B]Cerca...[/B]',
contentType='movie',
thumbnail='')]
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info('[filmsenzalimiti.py] search')
item.url = host + '/?s=' + texto
try:
return cerca(item)
# Continua la ricerca in caso di errore .
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
def sottomenu(item):
logger.info('[filmsenzalimiti.py] sottomenu')
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item.*?<a href="([^"]+)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action='video',
title=scrapedtitle,
url=scrapedurl))
# Elimina Film dal Sottomenù
itemlist.pop(0)
return itemlist
@support.scrape
def video(item):
logger.info('[filmsenzalimiti.py] video')
itemlist = []
patron = '<div class="col-mt-5 postsh">.*?<a href="(?P<url>[^"]+)" '\
'title="(?P<title>[^"]+)">.*?<span class="rating-number">(?P<rating>.*?)<.*?<img src="(?P<thumb>[^"]+)"'
patronNext = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
## support.scrape(item, itemlist, patron, ['url', 'title', 'rating', 'thumb'], patronNext=patronNext)
## return itemlist
return locals()
def cerca(item):
logger.info('[filmsenzalimiti.py] cerca')
itemlist = []
data = httptools.downloadpage(item.url).data.replace('\t','').replace('\n','')
logger.info('[filmsenzalimiti.py] video' +data)
patron = '<div class="list-score">(.*?)<.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedrating, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
scrapedrating = scrapertools.decodeHtmlentities(scrapedrating)
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=scrapedtitle + ' (' + scrapedrating + ')',
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
contentType=item.contentType,
thumbnail=scrapedthumbnail), tipo='movie')
patron = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != '':
itemlist.append(
Item(channel=item.channel,
action='video',
title='[COLOR lightgreen]' + config.get_localized_string(30992) + '[/COLOR]',
contentType=item.contentType,
url=next_page))
return itemlist
def findvideos(item):
logger.info('[filmsenzalimiti.py] findvideos')
itemlist = support.hdpass_get_servers(item)
# Link Aggiungi alla Libreria
if item.contentType == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
itemlist.append(
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findservers", contentTitle=item.contentTitle))
#Necessario per filtrare i Link
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Necessario per FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Necessario per AutoPlay
autoplay.start(itemlist, item)
return itemlist
def play(item):
itemlist = servertools.find_video_items(data=item.url)
return itemlist
def newest(categoria):
logger.info('[filmsenzalimiti.py] newest' + categoria)
itemlist = []
item = Item()
try:
## cambiare i valori 'peliculas, infantiles, series, anime, documentales por los que correspondan aqui en
# nel py e nel json ###
if categoria == 'peliculas':
item.url = host
itemlist = video(item)
if 'Successivo>>' in itemlist[-1].title:
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error('{0}'.format(line))
return []
return itemlist

View File

@@ -1,44 +0,0 @@
{
"id": "guardarefilm",
"name": "Guardarefilm",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/guardarefilm.png",
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/guardarefilm.png",
"categories": ["tvshow","movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,313 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per guardarefilm
# ----------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger, config
__channel__ = 'guardarefilm'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
def mainlist(item):
logger.info("kod.guardarefilm mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Novita'[/COLOR]",
action="peliculas",
url="%s/streaming-al-cinema/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]HD[/COLOR]",
action="peliculas",
url="%s/film-streaming-hd/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Popolari[/COLOR]",
action="pelis_top100",
url="%s/top100.html" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Categorie[/COLOR]",
action="categorias",
url=host+'/streaming-film/',
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Animazione[/COLOR]",
action="peliculas",
url="%s/streaming-cartoni-animati/" % host,
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=item.channel,
title="[COLOR azure]Serie TV[/COLOR]",
action="peliculas_tv",
extra="tvshow",
url="%s/serie-tv-streaming/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
action="search",
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def newest(categoria):
logger.info("kod.guardarefilm newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host + "/streaming-al-cinema/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def categorias(item):
logger.info("kod.guardarefilm categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, '<ul class="reset dropmenu">(.*?)</ul>')
# The categories are the options for the combo
patron = '<li><a href="([^"]+)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot))
return itemlist
def search(item, texto):
logger.info("[guardarefilm.py] " + item.url + " search " + texto)
section = ""
if item.extra == "tvshow":
section = "0"
elif item.extra == "movie":
section = "1"
item.url = '%s?do=search_advanced&q=%s&section=%s&director=&actor=&year_from=&year_to=' % (host, texto, section)
try:
if item.extra == "movie":
return peliculas(item)
if item.extra == "tvshow":
return peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info("kod.guardarefilm peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<div class="poster"><a href="([^"]+)".*?><img src="([^"]+)".*?><span.*?</div>\s*'
patron += '<div.*?><a.*?>(.*?)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios" if item.extra == "tvshow" else "findvideos",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<div class="pages".*?<span>.*?<a href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def peliculas_tv(item):
logger.info("kod.guardarefilm peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<div class="poster"><a href="([^"]+)".*?><img src="([^"]+)".*?><span.*?</div>\s*'
patron += '<div.*?><a.*?>(.*?)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios" if item.extra == "tvshow" else "findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<div class="pages".*?<span>.*?<a href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas_tv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def pelis_top100(item):
logger.info("kod.guardarefilm peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = r'<span class="top100_title"><a href="([^"]+)">(.*?\(\d+\))</a>'
matches = re.compile(patron).findall(data)
for scrapedurl, scrapedtitle in matches:
html = httptools.downloadpage(scrapedurl, headers=headers).data
start = html.find("<div class=\"textwrap\" itemprop=\"description\">")
end = html.find("</div>", start)
scrapedplot = html[start:end]
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail = scrapertools.find_single_match(html, r'class="poster-wrapp"><a href="([^"]+)"')
itemlist.append(
Item(channel=item.channel,
action="episodios" if item.extra == "tvshow" else "findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
plot=scrapedplot,
folder=True,
fanart=host + scrapedthumbnail))
return itemlist
def episodios(item):
logger.info("kod.guardarefilm episodios")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
patron = r'<li id="serie-[^"]+" data-title="Stai guardando: ([^"]+)">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumbnail,
extra=item.extra,
fulltitle=item.fulltitle,
show=item.show))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
def findvideos(item):
logger.info("kod.guardarefilm findvideos")
# Carica la pagina
data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist

View File

@@ -1,36 +0,0 @@
{
"id": "guardaseriecc",
"name": "Guardaserie.cc",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/raw.githubusercontent.com\/costaplus\/images\/master\/posters\/guardaseriecc.png",
"bannermenu": "https:\/\/raw.githubusercontent.com\/costaplus\/images\/master\/posters\/guardaseriecc.png",
"categories": ["tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,263 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per guardaseriecc
#
# ----------------------------------------------------------
import re
from core import httptools, scrapertools, servertools
from core import tmdb
from core.item import Item
from lib import unshortenit
from platformcode import logger, config
__channel__ = 'guardaseriecc'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
def mainlist(item):
logger.info("Alfa.leserietv mainlist")
itemlist = [Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]Tutte le serie[/COLOR]",
url="%s/serietv/" % host,
thumbnail=thumbnail_lista,
fanart=FilmFanart),
Item(channel=item.channel,
title="[COLOR azure]Categorie[/COLOR]",
action="categoria",
url=host,
thumbnail=thumbnail_categoria,
fanart=FilmFanart),
Item(channel=item.channel,
action="search",
title="[COLOR orange]Cerca...[/COLOR]",
thumbnail=thumbnail_cerca,
fanart=FilmFanart)]
return itemlist
def categoria(item):
logger.info("[Alfa].[guardareseriecc] [categoria]")
itemlist = []
patron = '<li class="cat-item cat-item.*?"><a href="(.*?)".*?>(.*?)</a>'
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle, viewmode="movie"))
return itemlist
def lista_serie(item):
logger.info("[Alfa].[guardareseriecc] [lista_serie]")
itemlist = []
patron = '<div.*?class="poster">[^<]+<img.*?src="(.*?)".*?alt="(.*?)"[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<a.*?href="(.*?)">'
data = httptools.downloadpage(item.url, headers=headers).data
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.split("(")[0]
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(
Item(channel=item.channel,
action="episodios",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle, viewmode="movie"))
# Paginazione
# ===========================================================
patron = 'class="current">.*?</span><a href=\'(.*?)\''
matches = scrapertools.find_single_match(data, patron)
logger.debug("pag " + matches)
# ===========================================================
if len(matches) > 0:
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=matches,
thumbnail=thumbnail_successivo,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info("[Alfa].[guardareseriecc] [stagione]")
itemlist = []
patron = '<iframe.*class="metaframe rptss".*?src="(.*?)".*?frameborder=".*?".*?scrolling=".*?".*?allowfullscreen>.*?</iframe>'
data = httptools.downloadpage(item.url, headers=headers).data
elenco = scrapertools.find_single_match(data, patron)
patron = '</i>.*?Stagioni</a>.*?</ul>[^<]+<select.*?name="sea_select"'
data = httptools.downloadpage(elenco, headers=headers).data
select = scrapertools.find_single_match(data, patron)
patron = '<a.*?href="(.*?)".*?><i.*?<\/i>(.*?)</a></li>'
stagione = scrapertools.find_multiple_matches(select, patron)
scrapertools.printMatches(stagione)
for stagioneurl, stagionetitle in stagione:
patron = '</i>.*?Episodio</a>(.*?)<select name="ep_select"'
data = httptools.downloadpage(stagioneurl, headers=headers).data
elenco = scrapertools.find_single_match(data, patron, 0)
patron = '<a href="(.*?)" ><i class="fa.*?"></i>(.*?)</a></li>'
episodi = scrapertools.find_multiple_matches(elenco, patron)
for scrapedurl, scrapedtitle in episodi:
scrapedtitle = stagionetitle + "x" + scrapedtitle.replace(" ", "").zfill(2)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
fanart=item.fanart,
plot=item.plot,
fulltitle=scrapedtitle,
contentType="episode",
show=scrapedtitle, viewmode="movie"))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentType="episode",
show=item.show))
return itemlist
def findvideos(item):
logger.info("[Alfa].[guardareseriecc] [findvideos]")
itemlist = []
listurl = set()
patron = r'<select.*?style="width:100px;" class="dynamic_select">(.*?)</select>'
data = httptools.downloadpage(item.url, headers=headers).data
elenco = scrapertools.find_single_match(data, patron, 0)
patron = '<a class="" href="(.*?)">(.*?)</a>'
elenco_link = scrapertools.find_multiple_matches(elenco, patron)
for scrapedurl, scrapedtitle in elenco_link:
data = httptools.downloadpage(scrapedurl, headers=headers).data
if 'protectlink' in data:
urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
for url in urls:
url = url.decode('base64')
# tiro via l'ultimo carattere perchè non c'entra
url = unshortenit.unwrap_30x_only(url[:-1])
listurl.add(url)
if listurl:
itemlist = servertools.find_video_items(data=str(listurl))
for videoitem in itemlist:
videoitem.title = item.title + '[COLOR orange][B]' + videoitem.title + '[/B][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist
def search(item, texto):
logger.info("[Alfa].[guardareseriecc][search] " + texto)
item.url = host + "/?s=" + texto
try:
return ricerca(item)
# Continua la ricerca in caso di errore .
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def ricerca(item):
logger.info("[Alfa].[guardareseriecc][ricerca] ")
itemlist = []
patron = '<div class="result-item">[^>]+>[^>]+>[^>]+>[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)" '
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.split("(")[0]
itemlist.append(
Item(channel=item.channel,
action="episodios",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle, viewmode="movie"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
FilmFanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
ThumbnailHome = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/81/Dynamic-blue-up.svg/580px-Dynamic-blue-up.svg.png"
thumbnail_novita = "http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"
thumbnail_lista = "http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"
thumbnail_categoria = "https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png"
thumbnail_top = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
thumbnail_cerca = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
thumbnail_successivo = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
'''
def search(item, texto):
logger.info("[Alfa].[guardareseriecc][search] " + texto)
itemlist = []
item.url = host + "/?s=" + texto
patron = '<div class="result-item">[^>]+>[^>]+>[^>]+>[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)" '
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data,patron)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedtitle = scrapedtitle.split("(")[0]
itemlist.append(
Item(channel=item.channel,
action="stagione",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle, viewmode="movie")))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
'''

View File

@@ -1,11 +0,0 @@
{
"id": "guardogratis",
"name": "GuardoGratis",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "guardogratis.png",
"bannermenu": "guardogratis.png",
"categories": ["movie","tvshow"],
"settings": []
}

View File

@@ -1,221 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per guardogratis
#
# ----------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger, config
__channel__ = "guardogratis"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
def mainlist(item):
logger.info("[guardogratis.py] mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="list_titles",
title="[COLOR azure]Film[/COLOR]",
url="%s/movies/" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="list_titles",
title="[COLOR azure]Top Film[/COLOR]",
url="%s/top-imdb/" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title="[COLOR azure]Categorie[/COLOR]",
url="%s" % host,
extra="categorie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="list_titles",
title="[COLOR azure]Serie Tv[/COLOR]",
url="%s/series/" % host,
extra="tvshow",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca Film[/COLOR]",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca SerieTV[/COLOR]",
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def list_titles(item):
logger.info("[guardogratis.py] list_titles")
itemlist = []
tipo='movie'
if 'tvshow' in item.extra: tipo='tv'
if item.url == "":
item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div data-movie-id=.*?href="([^"]+)".*?data-original="([^"]+)".*?<h2>([^<]+)<\/h2>.*?[I,T]MDb:\s*([^<]+)<\/div>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedurl = urlparse.urljoin(item.url, match.group(1))
scrapedthumbnail = urlparse.urljoin(item.url, match.group(2))
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
rate=' IMDb: [[COLOR orange]%s[/COLOR]]' % match.group(4) if match.group(4)!='N/A'else ''
scrapedtitle = scrapertools.unescape(match.group(3))
#scrapedtitle = scrapertools.unescape(match.group(3))+rate
itemlist.append(
Item(channel=item.channel,
action="findvideos" if not 'tvshow' in item.extra else 'serietv',
contentType="movie" if not 'tvshow' in item.extra else 'serie',
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"))
nextpage_regex=''
if item.extra in "movies,tvshow":
nextpage_regex='<div id="pagination" style="margin: 0;">.*?active.*?href=\'([^\']+)\'.*?</div>'
elif item.extra=="categorie":
nextpage_regex='<li class=\'active\'>.*?href=\'([^\']+)\'.*?</a></li>'
if nextpage_regex:
next_page = scrapertools.find_single_match(data, nextpage_regex)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="list_titles",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url="%s" % next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info("[guardogratis.py] search")
item.url = host + "/?s=" + texto
try:
if item.extra == "movie":
return list_titles(item)
if item.extra == "tvshow":
return list_titles(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorie(item):
logger.info("[guardogratis.py] categorie")
itemlist = []
if item.url == "":
item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<li id="menu-item-.*?category.*?href="([^"]+)">([^"]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedurl = urlparse.urljoin(item.url, match.group(1))
scrapedtitle = match.group(2)
itemlist.append(
Item(channel=item.channel,
action="list_titles",
title=scrapedtitle,
url=scrapedurl,
extra=item.extra,
folder=True))
return itemlist
def serietv(item):
logger.info("[guardogratis.py] serietv")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<a href="([^"]+)">Episode[^<]+</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedurl
scrapedtitle = scrapedtitle.replace(host, "")
scrapedtitle = scrapedtitle.replace("episode/", "")
scrapedtitle = scrapedtitle.replace("/", "")
scrapedtitle = scrapedtitle.replace("-", " ")
scrapedtitle = scrapedtitle.title()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="serietv",
show=item.show))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info("[guardogratis.py] findvideos")
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for i in itemlist:
tab=re.compile('<div\s*id="(tab[^"]+)"[^>]+>[^>]+>[^>]+src="http[s]*:%s[^"]+"'%i.url.replace('http:','').replace('https:',''), re.DOTALL).findall(data)
qual=''
if tab:
qual=re.compile('<a\s*href="#%s">([^<]+)<'%tab[0], re.DOTALL).findall(data)[0].replace("'","")
qual="[COLOR orange]%s[/COLOR] - "%qual
i.title='%s[COLOR green][B]%s[/B][/COLOR] - %s'%(qual,i.title[2:],item.title)
i.channel=__channel__
i.fulltitle=item.title
return itemlist

View File

@@ -1,36 +0,0 @@
{
"id": "itastreaming",
"name": "ItaStreaming",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/itastreaming.png",
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/itastreaming.png",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,295 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per itastreaming
# ----------------------------------------------------------
import re
from core import scrapertools, httptools, tmdb, support
from core.item import Item
from platformcode import logger, config
__channel__ = 'itastreaming'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
def mainlist(item):
logger.info("[itastreaming.py] mainlist")
itemlist = [
Item(channel=item.channel,
title="[COLOR azure]Home[/COLOR]",
action="fichas",
url=host,
thumbnail=""),
Item(channel=item.channel,
title="[COLOR azure]Nuove uscite[/COLOR]",
action="fichas",
url=host + "/nuove-uscite/",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Film per Genere[/COLOR]",
action="genere",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Film per Qualita'[/COLOR]",
action="quality",
url=host,
thumbnail="http://files.softicons.com/download/computer-icons/disks-icons-by-wil-nichols/png/256x256/Blu-Ray.png"),
Item(channel=item.channel,
title="[COLOR azure]Film A-Z[/COLOR]",
action="atoz",
url=host + "/tag/a/",
thumbnail="http://i.imgur.com/IjCmx5r.png"),
Item(channel=item.channel,
title="[COLOR orange]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def newest(categoria):
logger.info("[itastreaming.py] newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host + "/nuove-uscite/"
item.action = "fichas"
itemlist = fichas(item)
if itemlist[-1].action == "fichas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info("[itastreaming.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return searchfilm(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def searchfilm(item):
logger.info("[itastreaming.py] fichas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# fix - calidad
data = re.sub(
r'<div class="wrapperImage"[^<]+<a',
'<div class="wrapperImage"><fix>SD</fix><a',
data
)
# fix - IMDB
data = re.sub(
r'<h5> </div>',
'<fix>IMDB: 0.0</fix>',
data
)
patron = '<li class="s-item">.*?'
patron += 'src="([^"]+)".*?'
patron += 'alt="([^"]+)".*?'
patron += 'href="([^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
# ------------------------------------------------
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
# ------------------------------------------------
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
contentType="movie",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle))
# Paginación
next_page = scrapertools.find_single_match(data, "href='([^']+)'>Seguente &rsaquo;")
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="searchfilm",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def genere(item):
logger.info("[itastreaming.py] genere")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<ul class="sub-menu">(.+?)</ul>'
data = scrapertools.find_single_match(data, patron)
patron = '<li[^>]+><a href="([^"]+)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace('&amp;', '-')
itemlist.append(
Item(channel=item.channel,
action="fichas",
title=scrapedtitle,
url=scrapedurl,
folder=True))
return itemlist
def atoz(item):
logger.info("[itastreaming.py] genere")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<div class="generos">(.+?)</ul>'
data = scrapertools.find_single_match(data, patron)
patron = '<li>.*?'
patron += 'href="([^"]+)".*?'
patron += '>([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace('&amp;', '-')
itemlist.append(
Item(channel=item.channel,
action="fichas",
title=scrapedtitle,
url=scrapedurl,
folder=True))
return itemlist
def quality(item):
logger.info("[itastreaming.py] genere")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<a>Qualità</a>(.+?)</ul>'
data = scrapertools.find_single_match(data, patron)
patron = '<li id=".*?'
patron += 'href="([^"]+)".*?'
patron += '>([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace('&amp;', '-')
itemlist.append(
Item(channel=item.channel,
action="fichas",
title=scrapedtitle,
url=scrapedurl,
folder=True))
return itemlist
def fichas(item):
logger.info("[itastreaming.py] fichas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# fix - calidad
data = re.sub(
r'<div class="wrapperImage"[^<]+<a',
'<div class="wrapperImage"><fix>SD</fix><a',
data
)
# fix - IMDB
data = re.sub(
r'<h5> </div>',
'<fix>IMDB: 0.0</fix>',
data
)
patron = '<div class="item">.*?'
patron += 'href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
# ------------------------------------------------
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
# ------------------------------------------------
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle))
# Paginación
next_page = scrapertools.find_single_match(data, "href='([^']+)'>Seguente &rsaquo;")
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="fichas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info("[italiafilmvideohd.py] findvideos")
itemlist = support.hdpass_get_servers(item)
return itemlist

View File

@@ -41,7 +41,6 @@ def search(item, text):
action = 'findvideos'
else:
action = 'episodios'
item.contentType = 'tvshow'
try:
search = text
data = support.match(item, headers=headers)[1]

12
channels/netfreex.json Normal file
View File

@@ -0,0 +1,12 @@
{
"id": "netfreex",
"name": "Netfreex",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "netfreex.png",
"banner": "netfreex.png",
"categories": ["tvshow", "movie", "anime"],
"not_active": ["include_in_newest_peliculas", "include_in_newest_anime", "include_in_newest_italiano", "include_in_newest_series"],
"settings": []
}

70
channels/netfreex.py Normal file
View File

@@ -0,0 +1,70 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per netfreex
# ------------------------------------------------------------
from core import support
from core.item import Item
from platformcode import logger, config
__channel__ = "netfreex"
host = config.get_channel_url(__channel__)
headers = ""
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['wstream']
list_quality = ['1080p', 'HD', 'SD', 'CAM']
@support.menu
def mainlist(item):
film = ['/film',
('Generi', ['', 'menu', 'genres']),
('Anno', ['', 'menu', 'releases']),
]
tvshow = ['/serietv']
anime = ['/genere/anime']
return locals()
def search(item, text):
logger.info("[vedohd.py] " + item.url + " search " + text)
item.url = item.url + "/?s=" + text
return support.dooplay_search(item)
def peliculas(item):
if 'anime' in item.url:
return support.dooplay_peliculas(item, True)
else:
return support.dooplay_peliculas(item, False)
def episodios(item):
return support.dooplay_get_episodes(item)
def findvideos(item):
itemlist = []
for link in support.dooplay_get_links(item, host):
if link['title'] != 'Guarda il trailer':
logger.info(link['title'])
itemlist.append(
Item(channel=item.channel,
action="play",
url=link['url'],
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=link['title'],
contentType=item.contentType,
folder=False))
return support.server(item, itemlist=itemlist)
@support.scrape
def menu(item):
return support.dooplay_menu(item, item.args)

View File

@@ -227,7 +227,7 @@ def findvideos(item):
if urls:
for url in urls:
url, c = unshortenit.unshorten(url)
data += url + '\n'
data += url.encode('utf8') + '\n'
itemlist += servertools.find_video_items(data=data)

View File

@@ -10,4 +10,4 @@
"not_active":[],
"default_off":["include_in_newest"],
"settings": []
}
}

View File

@@ -2,52 +2,10 @@
"id": "streamtime",
"name": "StreamTime",
"language": ["ita"],
"active": false,
"active": true,
"adult": false,
"thumbnail": "",
"thumbnail": "streamtime.png",
"banner": "streamtime.png",
"categories": ["tvshow", "movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
"settings": []
}

View File

@@ -3,6 +3,12 @@ from core import support, httptools, scrapertoolsV2
from core.item import Item
from platformcode import config, logger
"""
Nota per i tester: questo non è un canale 'tradizionale', essendo un canale telegram, i cui contenuti (film/serie) sono mischiati tra loro (ed a volte ci sono messaggi sponsor),
la lista delle pagine non sarà affatto 'uniforme' (a seconda di come viene presentata la preview)
"""
__channel__ = "streamtime"
host = config.get_channel_url(__channel__)
headers = [['Referer', 'org.telegram.messenger']]
@@ -20,47 +26,45 @@ def mainlist(item):
@support.scrape
def peliculas(item):
patron = """tgme_widget_message_photo_wrap blured.*?image:url\("(?P<thumbnail>[^"]+).*?(?:🎥|🎬)(?P<title>.*?)(?:🎥|🎬).*?(?:Audio(?:</b>)?: (?P<lang>.*?<br>))?.*?Anno(?:</b>)?: (?P<year>[0-9]{4}).*?(?:<b>Stream</b>|Risoluzione|<b>Tipo</b>): (?P<quality>[^<]+).*?tgme_widget_message_inline_button url_button" href="(?P<url>[^"]+)"""
patron = """tgme_widget_message_photo_wrap.*?image:url\("(?P<thumbnail>[^"]+).*?//telegram\.org/img/emoji/40/(?:F09F8EAC|F09F8EA5)\.png"\)">.*?</i>\s?(?:<b>)?(?P<title>[^<]+).*?(?:Audio(?:</b>)?: (?P<lang>.*?<br>))?.*?Anno(?:</b>)?: (?P<year>[0-9]{4}).*?(?:<b>Stream</b>|Risoluzione|<b>Tipo</b>|Tipo|Stream): (?P<quality>[^<]+).*?tgme_widget_message_inline_button url_button" href="(?P<url>[^"]+)"""
def itemlistHook(itemlist):
retItemlist = []
# filtro per tipo
for i in itemlist:
if item.contentType == 'movie':
if '/Film/' in i.url or 'Stream-' in i.url:
retItemlist.append(i)
else:
if '/SerieTv/' in i.url:
retItemlist.append(i)
if '/Film/' in i.url or 'Stream-' in i.url:
i.contentType = 'movie'
if '/SerieTv/' in i.url:
i.contentType = 'tvshow'
i.action = 'episodios'
if item.contentType == i.contentType or item.contentType == 'list': # list = ricerca globale quando c'è un solo tipo di risultato
retItemlist.append(i)
# rimuovo duplicati
if item.contentType != 'movie':
if item.contentType != 'movie' and not item.cercaSerie:
nonDupl = []
# support.dbg()
for i in retItemlist:
for nd in nonDupl:
if i.title == nd.title:
if i.fulltitle == nd.fulltitle:
break
else:
daAgg = i
spl1 = i.url.split('-')
stagione1 = spl1[-2]
nEp1 = int(spl1[-1])
for i2 in retItemlist[1:]:
if i.title == i2.title:
spl2 = i2.url.split('-')
stagione2 = spl2[-2]
nEp2 = int(spl2[-1])
if stagione1 == stagione2 and nEp2 > nEp1:
daAgg = i2
nonDupl.append(daAgg)
nonDupl.append(i)
retItemlist = nonDupl
return retItemlist[::-1]
# debug = True
patronNext = 'tgme_widget_message_photo_wrap blured" href="([^"]+)'
def fullItemlistHook(itemlist):
msgId = int(itemlist[-1].url.split('/')[-1])
itemlist[-1].url = host + '?before=' + str(msgId) + '&after=' + str(msgId-20)
return itemlist
# nella ricerca faccio finta che non ci siano "pagine successive", sarebbe un casino gestirle (ed è piuttosto improbabile)
if item.action != 'search' and item.action:
patronNext = 'tgme_widget_message_photo_wrap blured" href="([^"]+)'
# trovo l'id dell'ultimo messaggio e nella pagina successiva ci metto il link per prendere i 20 msg precedenti
def fullItemlistHook(itemlist):
msgId = int(itemlist[-1].url.split('/')[-1])
itemlist[-1].url = host + '?before=' + str(msgId) + '&after=' + str(msgId-20)
return itemlist
# necessario per togliere html vario dal titolo (operazione fatta solo su fulltitle)
# def itemHook(item):
# item.contentTitle = item.fulltitle
# item.show = item.fulltitle
# return item
if item.contentType == 'tvshow':
action = 'episodios'
@@ -79,29 +83,64 @@ def search(item, texto):
return []
def newest(categoria):
item = Item()
if categoria == "series":
item.contentType = 'tvshow'
item.url = host + '?q=%23SerieTv'
else:
item.contentType = 'movie'
item.url = host + '?q=%23Film'
return peliculas(item)
# cerco il titolo, così mi escono fuori tutti i messaggi contenenti puntate singole o serie
def episodios(item):
domain, id, season, episode = scrapertoolsV2.find_single_match(item.url, r'(https?://[a-z0-9.-]+).*?/([^-/]+)-S([0-9]+)-([0-9]+)$')
url = item.url
item.cercaSerie = True
itemlist = search(item, item.fulltitle.replace("'", ""))
stagioni = {}
for i in itemlist[:-1]:
spl1 = i.url.split('-')
if len(spl1) > 3:
st = spl1[1] + '-' + spl1[2]
else:
st = spl1[-2]
nEp = int(spl1[-1])
if st not in stagioni.keys():
stagioni[st] = nEp
elif nEp > stagioni[st]:
stagioni[st] = nEp
itemlist = []
for n in range(1, int(episode)):
url = domain + '/play_s.php?s=' + id + '-S' + season + '&e=' + str(n)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=str(int(season)) + 'x' + str(n) + support.typo(item.quality, '-- [] color kod'),
url=url,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=item.quality,
contentType=item.contentType,
folder=False,
args={'id': id, 'season': season, 'episode': episode}))
domain, id = scrapertoolsV2.find_single_match(url, r'(https?://[a-z0-9.-]+)/[^/]+/([^-/]+)')
for st in sorted(stagioni.keys()):
season = st[1:]
episode = stagioni[st]
for n in range(1, int(episode)):
url = domain + '/play_s.php?s=' + id + '-S' + season + '&e=' + str(n)
if '-' in season: # vedi https://stpgs.ml/SerieTv/Atypical-S01-8-8.html
season = season.split('-')[0]
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=str(int(season)) + 'x' + str(n) + support.typo(item.quality, '-- [] color kod'),
url=url,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=item.quality,
contentType='episode',
folder=True,
args={'id': id, 'season': season, 'episode': episode}))
support.videolibrary(itemlist, item)
return itemlist
def findvideos(item):
# support.dbg()
domain = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+')
if item.contentType == 'movie':
id = item.url.split('/')[-1]
@@ -109,33 +148,29 @@ def findvideos(item):
else:
url = item.url
id = item.args['id']
season = item.args['season']
episode = item.args['episode']
res = support.match(item, 'src="([^"]+)">.*?</video>', url=url, headers=[['Referer', domain]])
season = str(item.args['season'])
episode = str(item.args['episode'])
res = support.match(item, 'src="([^"]+)"[^>]*></video>', url=url, headers=[['Referer', domain]])
itemlist = []
support.dbg()
if res[0]:
itemlist.append(
Item(channel=item.channel,
action="play",
title='stpgs.ml' + support.typo(item.quality, '-- [] color kod'),
title='contentful',
url=res[0][0],
server='directo',
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=item.quality,
contentType=item.contentType,
folder=False))
download = itemlist[0].clone()
if item.contentType == 'movie':
download.url = downPrefix + id
else:
download.url = downPrefix + id + 'S' + season + '-' + episode
itemlist.append(download)
contentType=item.contentType))
# download = itemlist[0].clone()
# if item.contentType == 'movie':
# download.url = downPrefix + id
# else:
# download.url = downPrefix + id + '-S' + season + '-' + episode
# itemlist.append(download)
else:
# google drive...
pass
support.videolibrary(itemlist, item)
return support.controls(itemlist, item, True, True)
return support.server(item, itemlist=itemlist)

View File

@@ -2,72 +2,227 @@
# ------------------------------------------------------------
# Canale per Tantifilm
# ------------------------------------------------------------
"""
Trasformate le sole def per support.menu e support.scrape
da non inviare nel test.
Test solo a trasformazione completa
"""
import re
import urlparse
from core import scrapertoolsV2, httptools, tmdb, support
from core import scrapertoolsV2, httptools, tmdb, support,servertools
from core.item import Item
from core.support import menu, log, aplay
from core.support import menu, log
from platformcode import logger
from specials import autorenumber
from platformcode import config
from platformcode import config, unify
from lib.unshortenit import unshorten_only
from lib import unshortenit
host = ''
headers = ''
def findhost():
global host, headers
permUrl = httptools.downloadpage('https://www.tantifilm.info/', follow_redirects=False).data
host = scrapertoolsV2.find_single_match(permUrl, r'<h2 style="text-align: center;"><a href="([^"]+)">Il nuovo indirizzo di Tantifilm è:</a></h2>')
if host.endswith('/'):
host = host[:-1]
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', 'vidlox', 'youtube']
list_quality = ['default']
__channel__ = "tantifilm"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
@support.menu
def mainlist(item):
log()
film = ['/film/',
('Al Cinema', ['/watch-genre/al-cinema/', 'peliculas', 'movie']),
('HD',['/watch-genre/altadefinizione/', 'peliculas', 'movie']),
('Categorie', ['', 'category', 'movie'])
findhost()
#top = [(support.typo('Novità Film/Serie/Anime/Altro', 'bold'),['/film/'])]
top = [('Novità Film/Serie/Anime/Altro', ['/film/', 'peliculas', 'all'])]
film = ['/watch-genre/film-aggiornati/',
('Al Cinema', ['/watch-genre/al-cinema/']),
('HD', ['/watch-genre/altadefinizione/']),
('Sub-ITA', ['/watch-genre/sub-ita/'])
]
tvshow = ['/watch-genre/serie-tv/',
('HD', ['/watch-genre/serie-altadefinizione/', 'peliculas']),
('Miniserie', ['/watch-genre/miniserie/', 'peliculas', 'serie']),
('Programmi TV', ['/watch-genre/programmi-tv/', 'peliculas'])
]
tvshow = ['/serie-tv/',
('HD', ['/watch-genre/serie-altadefinizione/']),
('Miniserie', ['/watch-genre/miniserie-1/']),
('Programmi TV', ['/watch-genre/programmi-tv/']),
#('LIVE', ['/watch-genre/live/'])
]
pinco = [('Anime', ['/watch-genre/anime/', 'peliculas', 'anime'])]
anime = ['/watch-genre/anime/'
]
search = ''
return locals()
##def mainlist(item):
@support.scrape
def peliculas(item):
log()
findhost()
if item.args == 'search':
patron = r'<a href="(?P<url>[^"]+)" title="Permalink to\s(?P<title>[^"]+) \((?P<year>[^<]+)\).*?".*?<img[^s]+src="(?P<thumb>[^"]+)".*?<div class="calitate">\s*<p>(?P<quality>[^<]+)<\/p>'
# support.regexDbg(item, patron, headers)
else:
patronNext = r'<a class="nextpostslink" rel="next" href="([^"]+)">'
patron = r'<div class="mediaWrap mediaWrapAlt">\s?<a href="(?P<url>[^"]+)"(?:[^>]+>|)>?\s?<img[^s]+src="([^"]+)"[^>]+>\s?<\/a>[^>]+>[^>]+>[^>]+>(?P<title>.+?)(?:[ ]<lang>[sSuUbB\-iItTaA]+)?(?:[ ]?\((?P<year>[\-\d+]+)\)).[^<]+[^>]+><\/a>.+?<p>\s*(?P<quality>[a-zA-Z-0-9\.]+)\s*<\/p>[^>]+>'
patronBlock = r'<div id="main_col">(?P<block>.*?)<!\-\- main_col \-\->'
if item.args != 'all' and item.args != 'search':
action = 'findvideos' if item.extra == 'movie' else 'episodios'
item.contentType = 'movie' if item.extra == 'movie' else 'tvshow'
else:
def itemHook(item):
item.action = 'episodios'
item.contentType = 'tvshow'
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
check = scrapertoolsV2.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>')
if 'sub' in check.lower():
item.contentLanguage = 'Sub-ITA'
item.title += support.typo('Sub-ITA', '_ [] color kod')
support.log("CHECK : ", check)
if 'anime' in check.lower():
support.log('select = ### è una anime ###')
item.action = 'episodios'
anime = True
args='anime'
elif 'serie' in check.lower():
pass
else:
support.log('select ELSE = ### è un film ###')
item.action = 'findvideos'
item.contentType='movie'
return item
#debug = True
return locals()
@support.scrape
def episodios(item):
log()
findhost()
data_check = httptools.downloadpage(item.url, headers=headers).data
data_check = re.sub('\n|\t', ' ', data_check)
data_check = re.sub(r'>\s+<', '> <', data_check)
patron_check = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">'
item.url = scrapertoolsV2.find_single_match(data_check, patron_check)
patronBlock = r'Episodio<\/a>.*?<ul class="nav navbar-nav">(?P<block>.*?)<\/ul>'
patron = r'<a href="(?P<url>[^"]+)"\s*>\s*<i[^>]+><\/i>\s*(?P<episode>\d+)<\/a>'
def itemHook(item):
item.contentType = 'tvshow'
url_season = item.url.rpartition('/')
support.log("ITEM URL: ", url_season[0])
seasons = support.match(item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=url_season[0])[0]
for season_url, season in seasons:
support.log("ITEM URL2: ", url_season[0],' - ', item.url)
if season_url[0] in item.url:
item.title = support.typo(season+'x'+unify.remove_format(item.title), 'bold')
## item.infoLabels['title'] = item.fulltitle if item.infoLabels['title'] == '' else item.infoLabels['title']
## item.infoLabels['tvshowtitle'] = item.fulltitle if item.infoLabels['tvshowtitle'] == '' else item.infoLabels['tvshowtitle']
break
return item
#debug = True
return locals()
def player_or_not(item):
return item
def category(item):
log()
findhost()
blacklist = ['Serie TV Altadefinizione', 'HD AltaDefinizione', 'Al Cinema', 'Serie TV', 'Miniserie', 'Programmi Tv', 'Live', 'Trailers', 'Serie TV Aggiornate', 'Aggiornamenti', 'Featured']
itemlist = support.scrape(item, '<li><a href="([^"]+)"><span></span>([^<]+)</a></li>', ['url', 'title'], headers, blacklist, patron_block='<ul class="table-list">(.*?)</ul>', action='peliculas')
return support.thumb(itemlist)
def anime(item):
log()
itemlist = []
seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0]
for season in seasons:
episodes = scrapertoolsV2.find_multiple_matches(season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)')
for url, title, urls, none in episodes:
urls = scrapertoolsV2.find_multiple_matches(urls, '<a.*?href="([^"]+)"[^>]+>')
for url2 in urls:
url += url2 + '\n'
#log('EP URL',url)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=support.typo(title + ' - ' + item.fulltitle,'bold'),
url=url,
fulltitle=title + ' - ' + item.show,
show=item.show,
thumbnail=item.thumbnail,
args=item.args))
autorenumber.renumber(itemlist, item,'bold')
support.videolibrary(itemlist, item, 'color kod bold')
return itemlist
def search(item, texto):
log(texto)
findhost()
item.url = host + "/?s=" + texto
try:
item.args = 'search'
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
##def search_peliculas(item):
## log()
## itemlist = []
##
## menu(itemlist, 'Film', 'peliculas', host + '/film/', 'movie', args='movie')
## menu(itemlist, 'Film Al Cinema submenu', 'peliculas', host + '/watch-genre/al-cinema/', 'movie')
## menu(itemlist, 'Film HD submenu', 'peliculas', host + '/watch-genre/altadefinizione/', 'movie')
## menu(itemlist, 'Film Per Categoria submenu', 'category', host, 'movie')
## menu(itemlist, 'Cerca film... submenu color kod', 'search', contentType='movie', args='findvideos')
## menu(itemlist, 'Serie TV', 'peliculas', host + '/watch-genre/serie-tv/', contentType='episode')
## menu(itemlist, 'Serie TV HD submenu', 'peliculas', host + '/watch-genre/serie-altadefinizione/', contentType='episode')
## menu(itemlist, 'Miniserie submenu', 'peliculas', host + '/watch-genre/miniserie/', contentType='episode', args='serie')
## menu(itemlist, 'Programmi TV submenu', 'peliculas', host + '/watch-genre/programmi-tv/', contentType='episode')
## menu(itemlist, 'Anime submenu', 'peliculas', host + '/watch-genre/anime/', contentType='episode', args='anime')
## menu(itemlist, 'Cerca Serie TV... submenu color kod', 'search', contentType='episode', args='episodios')
## aplay(item, itemlist, list_servers, list_quality)
## action = 'findvideos' if item.extra == 'movie' else 'episodios'
##
## data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace('\t','').replace('\n','')
## log(data)
## patron = r'<a href="([^"]+)" title="Permalink to\s([^"]+) \(([^<]+)\).*?".*?<img[^s]+src="([^"]+)".*?<div class="calitate">\s*<p>([^<]+)<\/p>'
## matches = re.compile(patron, re.MULTILINE).findall(data)
##
## for url, title, year, thumb, quality in matches:
## infoLabels = {}
## infoLabels['year'] = year
## title = scrapertoolsV2.decodeHtmlentities(title)
## quality = scrapertoolsV2.decodeHtmlentities(quality)
## longtitle = title + support.typo(quality,'_ [] color kod')
## itemlist.append(
## Item(channel=item.channel,
## action=action,
## contentType=item.contentType,
## fulltitle=title,
## show=title,
## title=longtitle,
## url=url,
## thumbnail=thumb,
## infoLabels=infoLabels,
## args=item.args))
##
## tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
##
## return itemlist
@@ -97,192 +252,93 @@ def newest(categoria):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
log(texto)
item.url = host + "/?s=" + texto
try:
return search_peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def search_peliculas(item):
log()
itemlist = []
action = 'findvideos' if item.extra == 'movie' else 'episodios'
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace('\t','').replace('\n','')
log(data)
patron = r'<a href="([^"]+)" title="Permalink to\s([^"]+) \(([^<]+)\).*?".*?<img[^s]+src="([^"]+)".*?<div class="calitate">\s*<p>([^<]+)<\/p>'
matches = re.compile(patron, re.MULTILINE).findall(data)
for url, title, year, thumb, quality in matches:
infoLabels = {}
infoLabels['year'] = year
title = scrapertoolsV2.decodeHtmlentities(title)
quality = scrapertoolsV2.decodeHtmlentities(quality)
longtitle = title + support.typo(quality,'_ [] color kod')
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
fulltitle=title,
show=title,
title=longtitle,
url=url,
thumbnail=thumb,
infoLabels=infoLabels,
args=item.args))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@support.scrape
def category(item):
log()
action = 'peliculas'
blacklist = ['Serie TV Altadefinizione', 'HD AltaDefinizione', 'Al Cinema',
'Serie TV', 'Miniserie', 'Programmi Tv', 'Live', 'Trailers',
'Serie TV Aggiornate', 'Aggiornamenti', 'Featured', 'Ultimi Film Aggiornati']
patron = r'<li><a href="(?P<url>[^"]+)"><span></span>(?P<title>[^<]+)</a></li>'
patronBlock = r'<ul class="table-list">(?P<block>.*?)</ul>'
return locals()
## itemlist = support.scrape(item, '<li><a href="([^"]+)"><span></span>([^<]+)</a></li>', ['url', 'title'], headers, blacklist, patron_block='<ul class="table-list">(.*?)</ul>', action='peliculas')
## return support.thumb(itemlist)
@support.scrape
def peliculas(item):
log()
action = 'findvideos' if item.extra == 'movie' else 'episodios'
if item.args == 'movie' or item.extra == 'movie':
patron = r'<div class="mediaWrap mediaWrapAlt">[^<]+<a href="(?P<url>[^"]+)" title="Permalink to\s(?P<title>[^"]+) \((?P<year>[^<]+)\).*?"[^>]+>[^<]+<img[^s]+src="(?P<thumb>[^"]+)"[^>]+>[^<]+<\/a>.*?<p>\s*(?P<quality>[a-zA-Z-0-9]+)\s*<\/p>'
patronBlock = '<div id="main_col">(?P<block>.*?)main_col'
## itemlist = support.scrape(item, patron, ['url', 'title', 'year', 'thumb', 'quality'], headers, action=action, patron_block='<div id="main_col">(.*?)main_col', patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
patronNext = '<a class="nextpostslink" rel="next" href="([^"]+)">'
else:
patron = r'<div class="media3">[^>]+><a href="(?P<url>[^"]+)"><img[^s]+src="(?P<thumb>[^"]+)"[^>]+><\/a><[^>]+><a[^<]+><p>(?P<title>[^<]+) \((?P<year>[^\)]+)[^<]+<\/p>.*?<p>\s*(?P<quality>[a-zA-Z-0-9]+)\s*<\/p>'
patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">'
action = action
# itemlist = support.scrape(item, patron, ['url', 'thumb', 'title', 'year', 'quality'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
if item.args == 'anime': autorenumber.renumber(itemlist)
## return itemlist
return locals()
def episodios(item):
log()
itemlist = []
if item.args == 'anime': return anime(item)
data = httptools.downloadpage(item.url).data
# Check if is series
check = scrapertoolsV2.find_single_match(data.replace('\t','').replace('\n',''), r'<div class="category-film"><h3>([^<]+)<\/h3>')
if 'serie tv' not in check.lower(): return findvideos(item)
elif 'anime' in check.lower(): return findvideos(item)
patron = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">'
url = scrapertoolsV2.find_single_match(data, patron)
log('URL =', url)
seasons = support.match(item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=url)[0]
for season_url, season in seasons:
season_url = urlparse.urljoin(url, season_url)
episodes = support.match(item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Episodio<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=season_url)[0]
for episode_url, episode in episodes:
episode_url = urlparse.urljoin(url, episode_url)
title = season + "x" + episode.zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=support.typo(title + ' - ' + item.fulltitle,'bold'),
url=episode_url,
fulltitle=title + ' - ' + item.show,
show=item.show,
thumbnail=item.thumbnail))
support.videolibrary(itemlist, item, 'color kod bold')
return itemlist
def anime(item):
log()
itemlist = []
seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0]
for season in seasons:
episodes = scrapertoolsV2.find_multiple_matches(season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)')
for url, title, urls, none in episodes:
urls = scrapertoolsV2.find_multiple_matches(urls, '<a.*?href="([^"]+)"[^>]+>')
for url2 in urls:
url += url2 + '\n'
log('EP URL',url)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=support.typo(title + ' - ' + item.fulltitle,'bold'),
url=url,
fulltitle=title + ' - ' + item.show,
show=item.show,
thumbnail=item.thumbnail,
args=item.args))
autorenumber.renumber(itemlist, item,'bold')
support.videolibrary(itemlist, item, 'color kod bold')
return itemlist
def findvideos(item):
log()
log()
listurl = set()
# itemlist = []
support.log("ITEMLIST: ", item)
## if item.args == 'anime':
## data = item.url
## else:
## data = httptools.downloadpage(item.url, headers=headers).data
data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url, headers=headers).data
if item.args == 'anime':
data = item.url
else:
data = httptools.downloadpage(item.url, headers=headers).data
if 'protectlink' in data:
urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
support.log("SONO QUI: ", urls)
for url in urls:
url = url.decode('base64')
# tiro via l'ultimo carattere perchè non c'entra
url, c = unshorten_only(url)
data += '\t' + url
support.log("SONO QUI: ", url)
if 'nodmca' in data:
page = httptools.downloadpage(url, headers=headers).data
url += isturl.add('\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">'))
# Check if is series
check = scrapertoolsV2.find_single_match(data.replace('\t','').replace('\n',''), r'<div class="category-film"><h3>([^<]+)<\/h3>')
if 'serie tv' in check.lower(): return episodios(item)
elif 'anime' in check.lower(): return anime(item)
if 'protectlink' in data:
urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
for url in urls:
url= url.decode('base64')
if '\t' in url:
url = url[:-1]
data += '\t' + url
if 'nodmca' in data:
page = httptools.downloadpage(url, headers=headers).data
data += '\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">')
return support.server(item, data, headers=headers)
return support.server(item, data=listurl)#, headers=headers)
# return itemlist
##def findvideos(item):
## log()
##
## # Carica la pagina
## data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url, headers=headers).data
##
## if 'protectlink' in data:
## urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
## for url in urls:
## url = url.decode('base64')
## data += '\t' + url
## url, c = unshorten_only(url)
## data += '\t' + url
##
## itemlist = servertools.find_video_items(data=data)
##
## for videoitem in itemlist:
## videoitem.title = item.title + videoitem.title
## videoitem.fulltitle = item.fulltitle
## videoitem.thumbnail = item.thumbnail
## videoitem.show = item.show
## videoitem.plot = item.plot
## videoitem.channel = item.channel
## videoitem.contentType = item.contentType
#### videoitem.language = IDIOMAS['Italiano']
##
#### # Requerido para Filtrar enlaces
####
#### if __comprueba_enlaces__:
#### itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
####
#### # Requerido para FilterTools
####
#### itemlist = filtertools.get_links(itemlist, item, list_language)
####
#### # Requerido para AutoPlay
####
#### autoplay.start(itemlist, item)
##
## if item.contentType != 'episode':
## if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
## itemlist.append(
## Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
## action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
##
## # Estrae i contenuti
## patron = r'\{"file":"([^"]+)","type":"[^"]+","label":"([^"]+)"\}'
## matches = re.compile(patron, re.DOTALL).findall(data)
## for scrapedurl, scrapedtitle in matches:
## title = item.title + " " + scrapedtitle + " quality"
## itemlist.append(
## Item(channel=item.channel,
## action="play",
## title=title,
## url=scrapedurl.replace(r'\/', '/').replace('%3B', ';'),
## thumbnail=item.thumbnail,
## fulltitle=item.title,
## show=item.title,
## server='',
## contentType=item.contentType,
## folder=False))
##
## return itemlist

View File

@@ -73,11 +73,7 @@ def findvideos(item):
@support.scrape
def menu(item):
patron = '<a href="(?P<url>[^"#]+)"(?: title="[^"]+")?>(?P<title>[a-zA-Z0-9]+)'
patronBlock = '<nav class="' + item.args + '">(?P<block>.*?)</nav>'
action = 'peliculas'
return locals()
return support.dooplay_menu(item, item.args)
def play(item):

View File

@@ -364,7 +364,7 @@ def set_channel_setting(name, value, channel):
file_settings = os.path.join(config.get_data_path(), "settings_channels", channel + "_data.json")
dict_settings = {}
def_settings = get_default_settings(channel)
if channel not in ['trakt']: def_settings = get_default_settings(channel)
dict_file = None

View File

@@ -367,9 +367,6 @@ def proxy_post_processing(url, proxy_data, response, opt):
def downloadpage(url, **opt):
logger.info()
"""
Open a url and return the data obtained
@@ -441,7 +438,7 @@ def downloadpage(url, **opt):
opt['proxy_retries_counter'] += 1
session = cloudscraper.create_scraper()
session.verify = False
# session.verify = False
if opt.get('cookies', True):
session.cookies = cj
session.headers.update(req_headers)
@@ -620,4 +617,4 @@ def fill_fields_post(info_dict, req, response, req_headers, inicio):
import traceback
logger.error(traceback.format_exc(1))
return info_dict, response
return info_dict, response

View File

@@ -107,35 +107,36 @@ def search(channel, item, texto):
def dbg():
import web_pdb;
if not web_pdb.WebPdb.active_instance:
import webbrowser
webbrowser.open('http://127.0.0.1:5555')
web_pdb.set_trace()
if config.dev_mode():
import web_pdb;
if not web_pdb.WebPdb.active_instance:
import webbrowser
webbrowser.open('http://127.0.0.1:5555')
web_pdb.set_trace()
def regexDbg(item, patron, headers, data=''):
import json, urllib2, webbrowser
url = 'https://regex101.com'
if config.dev_mode():
import json, urllib2, webbrowser
url = 'https://regex101.com'
if not data:
html = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"')
html = re.sub('\n|\t', ' ', html)
else:
html = data
headers = {'content-type': 'application/json'}
data = {
'regex': patron,
'flags': 'gm',
'testString': html,
'delimiter': '"""',
'flavor': 'python'
}
r = urllib2.Request(url + '/api/regex', json.dumps(data, encoding='latin1'), headers=headers)
r = urllib2.urlopen(r).read()
permaLink = json.loads(r)['permalinkFragment']
webbrowser.open(url + "/r/" + permaLink)
if not data:
html = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"')
html = re.sub('\n|\t', ' ', html)
else:
html = data
headers = {'content-type': 'application/json'}
data = {
'regex': patron,
'flags': 'gm',
'testString': html,
'delimiter': '"""',
'flavor': 'python'
}
r = urllib2.Request(url + '/api/regex', json.dumps(data, encoding='latin1'), headers=headers)
r = urllib2.urlopen(r).read()
permaLink = json.loads(r)['permalinkFragment']
webbrowser.open(url + "/r/" + permaLink)
def scrape2(item, patron = '', listGroups = [], headers="", blacklist="", data="", patronBlock="",
@@ -437,7 +438,7 @@ def scrape(func):
if config.get_setting('downloadenabled') and (function == 'episodios' or function == 'findvideos'):
download(itemlist, item, function=function)
if 'patronMenu' in args:
if 'patronMenu' in args and itemlist:
itemlist = thumb(itemlist, genre=True)
if 'fullItemlistHook' in args:
@@ -530,6 +531,15 @@ def dooplay_search_vars(item, blacklist):
return locals()
def dooplay_menu(item, type):
patron = '<a href="(?P<url>[^"#]+)"(?: title="[^"]+")?>(?P<title>[a-zA-Z0-9]+)'
patronBlock = '<nav class="' + item.args + '">(?P<block>.*?)</nav>'
action = 'peliculas'
return locals()
def swzz_get_url(item):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0'}
@@ -703,42 +713,42 @@ def typo(string, typography=''):
# If there are no attributes, it applies the default ones
attribute = ['[]','()','{}','submenu','color','bold','italic','_','--','[B]','[I]','[COLOR]']
movie_word_list = ['film', 'serie', 'tv', 'anime', 'cinema', 'sala']
search_word_list = ['cerca']
categories_word_list = ['genere', 'categoria', 'categorie', 'ordine', 'lettera', 'anno', 'alfabetico', 'a-z', 'menu']
# movie_word_list = ['film', 'serie', 'tv', 'anime', 'cinema', 'sala']
# search_word_list = ['cerca']
# categories_word_list = ['genere', 'categoria', 'categorie', 'ordine', 'lettera', 'anno', 'alfabetico', 'a-z', 'menu']
if not any(word in string for word in attribute):
if any(word in string.lower() for word in search_word_list):
string = '[COLOR '+ kod_color +']' + string + '[/COLOR]'
elif any(word in string.lower() for word in categories_word_list):
string = ' > ' + string
elif any(word in string.lower() for word in movie_word_list):
string = '[B]' + string + '[/B]'
# if not any(word in string for word in attribute):
# if any(word in string.lower() for word in search_word_list):
# string = '[COLOR '+ kod_color +']' + string + '[/COLOR]'
# elif any(word in string.lower() for word in categories_word_list):
# string = ' > ' + string
# elif any(word in string.lower() for word in movie_word_list):
# string = '[B]' + string + '[/B]'
# Otherwise it uses the typographical attributes of the string
else:
if '[]' in string:
string = '[' + re.sub(r'\s\[\]','',string) + ']'
if '()' in string:
string = '(' + re.sub(r'\s\(\)','',string) + ')'
if '{}' in string:
string = '{' + re.sub(r'\s\{\}','',string) + '}'
if 'submenu' in string:
string = u"\u2022\u2022 ".encode('utf-8') + re.sub(r'\ssubmenu','',string)
if 'color' in string:
color = scrapertoolsV2.find_single_match(string,'color ([a-z]+)')
if color == 'kod' or '': color = kod_color
string = '[COLOR '+ color +']' + re.sub(r'\scolor\s([a-z]+)','',string) + '[/COLOR]'
if 'bold' in string:
string = '[B]' + re.sub(r'\sbold','',string) + '[/B]'
if 'italic' in string:
string = '[I]' + re.sub(r'\sitalic','',string) + '[/I]'
if '_' in string:
string = ' ' + re.sub(r'\s_','',string)
if '--' in string:
string = ' - ' + re.sub(r'\s--','',string)
if 'bullet' in string:
string = '[B]' + u"\u2022".encode('utf-8') + '[/B] ' + re.sub(r'\sbullet','',string)
# else:
if '[]' in string:
string = '[' + re.sub(r'\s\[\]','',string) + ']'
if '()' in string:
string = '(' + re.sub(r'\s\(\)','',string) + ')'
if '{}' in string:
string = '{' + re.sub(r'\s\{\}','',string) + '}'
if 'submenu' in string:
string = u"\u2022\u2022 ".encode('utf-8') + re.sub(r'\ssubmenu','',string)
if 'color' in string:
color = scrapertoolsV2.find_single_match(string,'color ([a-z]+)')
if color == 'kod' or '': color = kod_color
string = '[COLOR '+ color +']' + re.sub(r'\scolor\s([a-z]+)','',string) + '[/COLOR]'
if 'bold' in string:
string = '[B]' + re.sub(r'\sbold','',string) + '[/B]'
if 'italic' in string:
string = '[I]' + re.sub(r'\sitalic','',string) + '[/I]'
if '_' in string:
string = ' ' + re.sub(r'\s_','',string)
if '--' in string:
string = ' - ' + re.sub(r'\s--','',string)
if 'bullet' in string:
string = '[B]' + u"\u2022".encode('utf-8') + '[/B] ' + re.sub(r'\sbullet','',string)
return string
@@ -915,6 +925,7 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru
verifiedItemlist = []
for videoitem in itemlist:
if not videoitem.server:
videoitem.url = unshortenit.unshorten(videoitem.url)[0]
findS = servertools.findvideos(videoitem.url)
if findS:
findS = findS[0]

View File

@@ -3,19 +3,14 @@
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*
import os
import os, xbmc
from core import httptools, jsontools
from core.item import Item
from platformcode import config, logger
from threading import Thread
import xbmc
from core import httptools
from core import jsontools
from core.item import Item
from platformcode import config
from platformcode import logger
client_id = "c40ba210716aee87f6a9ddcafafc56246909e5377b623b72c15909024448e89d"
client_secret = "999164f25832341f0214453bb11c915adb18e9490d6b5e9a707963a5a1bee43e"
client_id = "404dac2771a60701f1b4e2ec0e9515ece058ec47a66cffce1480aaa022e0aee9"
client_secret = " ad7c56c19f8a633441b3011a7b228c74977deb2eb2749fb5b022f2dc0c96473a"
def auth_trakt():
@@ -29,7 +24,7 @@ def auth_trakt():
post = jsontools.dump(post)
# Se solicita url y código de verificación para conceder permiso a la app
url = "http://api-v2launch.trakt.tv/oauth/device/code"
data = httptools.downloadpage(url, post=post, headers=headers, replace_headers=True).data
data = httptools.downloadpage(url, post=post, headers=headers).data
data = jsontools.load(data)
item.verify_url = data["verification_url"]
item.user_code = data["user_code"]
@@ -63,12 +58,12 @@ def token_trakt(item):
post = {'refresh_token': refresh, 'client_id': client_id, 'client_secret': client_secret,
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob', 'grant_type': 'refresh_token'}
post = jsontools.dump(post)
data = httptools.downloadpage(url, post, headers, replace_headers=True).data
data = httptools.downloadpage(url, post=post, headers=headers).data
data = jsontools.load(data)
elif item.action == "token_trakt":
url = "http://api-v2launch.trakt.tv/oauth/device/token"
post = "code=%s&client_id=%s&client_secret=%s" % (item.device_code, client_id, client_secret)
data = httptools.downloadpage(url, post, headers, replace_headers=True).data
data = httptools.downloadpage(url, post=post, headers=headers).data
data = jsontools.load(data)
else:
import time
@@ -89,7 +84,7 @@ def token_trakt(item):
url = "http://api-v2launch.trakt.tv/oauth/device/token"
post = {'code': item.device_code, 'client_id': client_id, 'client_secret': client_secret}
post = jsontools.dump(post)
data = httptools.downloadpage(url, post, headers, replace_headers=True).data
data = httptools.downloadpage(url, post=post, headers=headers).data
data = jsontools.load(data)
if "access_token" in data:
# Código introducido, salimos del bucle
@@ -169,7 +164,7 @@ def get_trakt_watched(id_type, mediatype, update=False):
if token_auth:
headers.append(['Authorization', "Bearer %s" % token_auth])
url = "https://api.trakt.tv/sync/watched/%s" % mediatype
data = httptools.downloadpage(url, headers=headers, replace_headers=True).data
data = httptools.downloadpage(url, headers=headers).data
watched_dict = jsontools.load(data)
if mediatype == 'shows':
@@ -214,7 +209,7 @@ def trakt_check(itemlist):
if not synced:
get_sync_from_file()
synced = True
mediatype = 'movies'
id_type = 'tmdb'

501
lib/cloudscraper/__init__.py Normal file → Executable file
View File

@@ -4,13 +4,17 @@ import sys
import ssl
import requests
try:
import copyreg
except ImportError:
import copy_reg as copyreg
from copy import deepcopy
from time import sleep
from collections import OrderedDict
from requests.sessions import Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.ssl_ import create_urllib3_context
from .interpreters import JavaScriptInterpreter
from .reCaptcha import reCaptcha
@@ -31,225 +35,431 @@ try:
except ImportError:
from urllib.parse import urlparse
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
__version__ = '1.1.24'
__version__ = '1.2.14'
BUG_REPORT = 'Cloudflare may have changed their technique, or there may be a bug in the script.'
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
# class CipherSuiteAdapter(HTTPAdapter):
#
# def __init__(self, cipherSuite=None, **kwargs):
# self.cipherSuite = cipherSuite
#
# self.ssl_context = create_urllib3_context(
# ssl_version=ssl.PROTOCOL_TLS,
# ciphers=self.cipherSuite
# )
#
# super(CipherSuiteAdapter, self).__init__(**kwargs)
class CipherSuiteAdapter(HTTPAdapter):
def __init__(self, cipherSuite=None, **kwargs):
self.cipherSuite = cipherSuite
__attrs__ = [
'ssl_context',
'max_retries',
'config',
'_pool_connections',
'_pool_maxsize',
'_pool_block'
]
if hasattr(ssl, 'PROTOCOL_TLS'):
self.ssl_context = create_urllib3_context(
ssl_version=getattr(ssl, 'PROTOCOL_TLSv1_3', ssl.PROTOCOL_TLSv1_2),
ciphers=self.cipherSuite
)
else:
self.ssl_context = create_urllib3_context(ssl_version=ssl.PROTOCOL_TLSv1)
def __init__(self, *args, **kwargs):
self.ssl_context = kwargs.pop('ssl_context', None)
self.cipherSuite = kwargs.pop('cipherSuite', None)
if not self.ssl_context:
self.ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
self.ssl_context.set_ciphers(self.cipherSuite)
self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1)
super(CipherSuiteAdapter, self).__init__(**kwargs)
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def init_poolmanager(self, *args, **kwargs):
kwargs['ssl_context'] = self.ssl_context
return super(CipherSuiteAdapter, self).init_poolmanager(*args, **kwargs)
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def proxy_manager_for(self, *args, **kwargs):
kwargs['ssl_context'] = self.ssl_context
return super(CipherSuiteAdapter, self).proxy_manager_for(*args, **kwargs)
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
class CloudScraper(Session):
def __init__(self, *args, **kwargs):
self.allow_brotli = kwargs.pop('allow_brotli', True if 'brotli' in sys.modules.keys() else False)
self.debug = kwargs.pop('debug', False)
self.delay = kwargs.pop('delay', None)
self.cipherSuite = kwargs.pop('cipherSuite', None)
self.interpreter = kwargs.pop('interpreter', 'js2py')
self.recaptcha = kwargs.pop('recaptcha', {})
self.allow_brotli = kwargs.pop(
'allow_brotli',
True if 'brotli' in sys.modules.keys() else False
)
self.user_agent = User_Agent(
allow_brotli=self.allow_brotli,
browser=kwargs.pop('browser', None)
)
self.cipherSuite = None
self._solveDepthCnt = 0
self.solveDepth = kwargs.pop('solveDepth', 3)
super(CloudScraper, self).__init__(*args, **kwargs)
# pylint: disable=E0203
if 'requests' in self.headers['User-Agent']:
# ------------------------------------------------------------------------------- #
# Set a random User-Agent if no custom User-Agent has been set
self.headers = User_Agent(allow_brotli=self.allow_brotli).headers
# ------------------------------------------------------------------------------- #
self.headers = self.user_agent.headers
self.mount('https://', CipherSuiteAdapter(self.loadCipherSuite()))
self.mount(
'https://',
CipherSuiteAdapter(
cipherSuite=self.loadCipherSuite() if not self.cipherSuite else self.cipherSuite
)
)
##########################################################################################################################################################
# purely to allow us to pickle dump
copyreg.pickle(ssl.SSLContext, lambda obj: (obj.__class__, (obj.protocol,)))
# ------------------------------------------------------------------------------- #
# Allow us to pickle our session back with all variables
# ------------------------------------------------------------------------------- #
def __getstate__(self):
return self.__dict__
# ------------------------------------------------------------------------------- #
# debug the request via the response
# ------------------------------------------------------------------------------- #
@staticmethod
def debugRequest(req):
try:
print(dump.dump_all(req).decode('utf-8'))
except: # noqa
pass
except ValueError as e:
print("Debug Error: {}".format(getattr(e, 'message', e)))
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
# Decode Brotli on older versions of urllib3 manually
# ------------------------------------------------------------------------------- #
def decodeBrotli(self, resp):
if requests.packages.urllib3.__version__ < '1.25.1' and resp.headers.get('Content-Encoding') == 'br':
if self.allow_brotli and resp._content:
resp._content = brotli.decompress(resp.content)
else:
logging.warning(
'You\'re running urllib3 {}, Brotli content detected, '
'Which requires manual decompression, '
'But option allow_brotli is set to False, '
'We will not continue to decompress.'.format(requests.packages.urllib3.__version__)
)
return resp
# ------------------------------------------------------------------------------- #
# construct a cipher suite of ciphers the system actually supports
# ------------------------------------------------------------------------------- #
def loadCipherSuite(self):
if self.cipherSuite:
return self.cipherSuite
self.cipherSuite = ''
if hasattr(ssl, 'PROTOCOL_TLS'):
ciphers = [
'TLS13-AES-128-GCM-SHA256',
'TLS13-AES-256-GCM-SHA384',
'TLS13-CHACHA20-POLY1305-SHA256',
'ECDHE-ECDSA-CHACHA20-POLY1305',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES128-SHA',
'ECDHE-ECDSA-AES128-SHA256',
'ECDHE-ECDSA-AES256-GCM-SHA384',
'ECDHE-ECDSA-AES256-SHA',
'ECDHE-ECDSA-AES256-SHA384',
# Slip in some additional intermediate compatibility ciphers, This should help out users for non Cloudflare based sites.
'ECDHE-RSA-AES128-SHA256',
'ECDHE-RSA-AES256-SHA384',
'ECDHE-RSA-AES256-GCM-SHA384',
'DHE-RSA-AES128-GCM-SHA256',
'DHE-RSA-AES256-GCM-SHA384'
]
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
for cipher in ciphers:
if hasattr(ssl, 'Purpose') and hasattr(ssl.Purpose, 'SERVER_AUTH'):
for cipher in self.user_agent.cipherSuite[:]:
try:
ctx.set_ciphers(cipher)
self.cipherSuite = '{}:{}'.format(self.cipherSuite, cipher).rstrip(':').lstrip(':')
except ssl.SSLError:
pass
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
context.set_ciphers(cipher)
except:
self.user_agent.cipherSuite.remove(cipher)
return self.cipherSuite
if self.user_agent.cipherSuite:
self.cipherSuite = ':'.join(self.user_agent.cipherSuite)
return self.cipherSuite
##########################################################################################################################################################
sys.tracebacklimit = 0
raise RuntimeError("The OpenSSL on this system does not meet the minimum cipher requirements.")
# ------------------------------------------------------------------------------- #
# Our hijacker request function
# ------------------------------------------------------------------------------- #
def request(self, method, url, *args, **kwargs):
ourSuper = super(CloudScraper, self)
resp = ourSuper.request(method, url, *args, **kwargs)
# pylint: disable=E0203
if kwargs.get('proxies') and kwargs.get('proxies') != self.proxies:
self.proxies = kwargs.get('proxies')
if requests.packages.urllib3.__version__ < '1.25.1' and resp.headers.get('Content-Encoding') == 'br':
if self.allow_brotli and resp._content:
resp._content = brotli.decompress(resp.content)
else:
logging.warning('Brotli content detected, But option is disabled, we will not continue.')
return resp
resp = self.decodeBrotli(
super(CloudScraper, self).request(method, url, *args, **kwargs)
)
# ------------------------------------------------------------------------------- #
# Debug request
# ------------------------------------------------------------------------------- #
if self.debug:
self.debugRequest(resp)
# Check if Cloudflare anti-bot is on
if self.isChallengeRequest(resp):
if resp.request.method != 'GET':
# Work around if the initial request is not a GET,
# Supersede with a GET then re-request the original METHOD.
self.request('GET', resp.url)
resp = ourSuper.request(method, url, *args, **kwargs)
else:
# Solve Challenge
resp = self.sendChallengeResponse(resp, **kwargs)
if self.is_Challenge_Request(resp):
# ------------------------------------------------------------------------------- #
# Try to solve the challenge and send it back
# ------------------------------------------------------------------------------- #
if self._solveDepthCnt >= self.solveDepth:
sys.tracebacklimit = 0
_ = self._solveDepthCnt
self._solveDepthCnt = 0
raise RuntimeError("!!Loop Protection!! We have tried to solve {} time(s) in a row.".format(_))
self._solveDepthCnt += 1
resp = self.Challenge_Response(resp, **kwargs)
else:
if resp.status_code not in [302, 429, 503]:
self._solveDepthCnt = 0
return resp
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
# check if the response contains a valid Cloudflare challenge
# ------------------------------------------------------------------------------- #
@staticmethod
def isChallengeRequest(resp):
if resp.headers.get('Server', '').startswith('cloudflare'):
def is_IUAM_Challenge(resp):
try:
return (
resp.status_code in [403, 429, 503]
and (
all(s in resp.content for s in [b'jschl_vc', b'jschl_answer'])
or
all(s in resp.content for s in [b'why_captcha', b'/cdn-cgi/l/chk_captcha'])
resp.headers.get('Server', '').startswith('cloudflare')
and resp.status_code in [429, 503]
and re.search(
r'action="/.*?__cf_chl_jschl_tk__=\S+".*?name="jschl_vc"\svalue=.*?',
resp.text,
re.M | re.DOTALL
)
)
except AttributeError:
pass
return False
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
# check if the response contains a valid Cloudflare reCaptcha challenge
# ------------------------------------------------------------------------------- #
def sendChallengeResponse(self, resp, **original_kwargs):
body = resp.text
@staticmethod
def is_reCaptcha_Challenge(resp):
try:
return (
resp.headers.get('Server', '').startswith('cloudflare')
and resp.status_code == 403
and re.search(
r'action="/.*?__cf_chl_captcha_tk__=\S+".*?data\-sitekey=.*?',
resp.text,
re.M | re.DOTALL
)
)
except AttributeError:
pass
parsed_url = urlparse(resp.url)
domain = parsed_url.netloc
return False
params = OrderedDict()
# ------------------------------------------------------------------------------- #
# Wrapper for is_reCaptcha_Challenge and is_IUAM_Challenge
# ------------------------------------------------------------------------------- #
s = re.search(r'name="s"\svalue="(?P<s_value>[^"]+)', body)
if s:
params['s'] = s.group('s_value')
def is_Challenge_Request(self, resp):
if self.is_reCaptcha_Challenge(resp) or self.is_IUAM_Challenge(resp):
return True
return False
# ------------------------------------------------------------------------------- #
# Try to solve cloudflare javascript challenge.
# ------------------------------------------------------------------------------- #
@staticmethod
def IUAM_Challenge_Response(body, url, interpreter):
try:
challengeUUID = re.search(
r'id="challenge-form" action="(?P<challengeUUID>\S+)"',
body, re.M | re.DOTALL
).groupdict().get('challengeUUID', '')
payload = OrderedDict(re.findall(r'name="(r|jschl_vc|pass)"\svalue="(.*?)"', body))
except AttributeError:
sys.tracebacklimit = 0
raise RuntimeError(
"Cloudflare IUAM detected, unfortunately we can't extract the parameters correctly."
)
hostParsed = urlparse(url)
try:
payload['jschl_answer'] = JavaScriptInterpreter.dynamicImport(
interpreter
).solveChallenge(body, hostParsed.netloc)
except Exception as e:
raise RuntimeError(
'Unable to parse Cloudflare anti-bots page: {}'.format(
getattr(e, 'message', e)
)
)
return {
'url': '{}://{}{}'.format(
hostParsed.scheme,
hostParsed.netloc,
challengeUUID
),
'data': payload
}
# ------------------------------------------------------------------------------- #
# Try to solve the reCaptcha challenge via 3rd party.
# ------------------------------------------------------------------------------- #
@staticmethod
def reCaptcha_Challenge_Response(provider, provider_params, body, url):
try:
payload = re.search(
r'(name="r"\svalue="(?P<r>\S+)"|).*?challenge-form" action="(?P<challengeUUID>\S+)".*?'
r'data-ray="(?P<data_ray>\S+)".*?data-sitekey="(?P<site_key>\S+)"',
body, re.M | re.DOTALL
).groupdict()
except (AttributeError):
sys.tracebacklimit = 0
raise RuntimeError(
"Cloudflare reCaptcha detected, unfortunately we can't extract the parameters correctly."
)
hostParsed = urlparse(url)
return {
'url': '{}://{}{}'.format(
hostParsed.scheme,
hostParsed.netloc,
payload.get('challengeUUID', '')
),
'data': OrderedDict([
('r', payload.get('r', '')),
('id', payload.get('data_ray')),
(
'g-recaptcha-response',
reCaptcha.dynamicImport(
provider.lower()
).solveCaptcha(url, payload.get('site_key'), provider_params)
)
])
}
# ------------------------------------------------------------------------------- #
# Attempt to handle and send the challenge response back to cloudflare
# ------------------------------------------------------------------------------- #
def Challenge_Response(self, resp, **kwargs):
if self.is_reCaptcha_Challenge(resp):
# ------------------------------------------------------------------------------- #
# double down on the request as some websites are only checking
# if cfuid is populated before issuing reCaptcha.
# ------------------------------------------------------------------------------- #
resp = self.decodeBrotli(
super(CloudScraper, self).request(resp.request.method, resp.url, **kwargs)
)
if not self.is_reCaptcha_Challenge(resp):
return resp
# ------------------------------------------------------------------------------- #
# if no reCaptcha provider raise a runtime error.
# ------------------------------------------------------------------------------- #
if b'/cdn-cgi/l/chk_captcha' in resp.content:
if not self.recaptcha or not isinstance(self.recaptcha, dict) or not self.recaptcha.get('provider'):
sys.tracebacklimit = 0
raise RuntimeError("Cloudflare reCaptcha detected, unfortunately you haven't loaded an anti reCaptcha provider correctly via the 'recaptcha' parameter.")
raise RuntimeError(
"Cloudflare reCaptcha detected, unfortunately you haven't loaded an anti reCaptcha provider "
"correctly via the 'recaptcha' parameter."
)
# ------------------------------------------------------------------------------- #
# if provider is return_response, return the response without doing anything.
# ------------------------------------------------------------------------------- #
if self.recaptcha.get('provider') == 'return_response':
return resp
submit_url = '{}://{}/cdn-cgi/l/chk_captcha'.format(parsed_url.scheme, domain)
self.recaptcha['proxies'] = self.proxies
params['g-recaptcha-response'] = reCaptcha.dynamicImport(self.recaptcha.get('provider').lower()).solveCaptcha(resp, self.recaptcha)
submit_url = self.reCaptcha_Challenge_Response(
self.recaptcha.get('provider'),
self.recaptcha,
resp.text,
resp.url
)
else:
# ------------------------------------------------------------------------------- #
# Cloudflare requires a delay before solving the challenge
# ------------------------------------------------------------------------------- #
if not self.delay:
try:
delay = float(re.search(r'submit\(\);\r?\n\s*},\s*([0-9]+)', body).group(1)) / float(1000)
delay = float(
re.search(
r'submit\(\);\r?\n\s*},\s*([0-9]+)',
resp.text
).group(1)
) / float(1000)
if isinstance(delay, (int, float)):
self.delay = delay
except: # noqa
pass
except (AttributeError, ValueError):
sys.tracebacklimit = 0
raise RuntimeError("Cloudflare IUAM possibility malformed, issue extracing delay value.")
sleep(self.delay)
submit_url = '{}://{}/cdn-cgi/l/chk_jschl'.format(parsed_url.scheme, domain)
try:
params.update(
[
('jschl_vc', re.search(r'name="jschl_vc" value="(\w+)"', body).group(1)),
('pass', re.search(r'name="pass" value="(.+?)"', body).group(1)),
('jschl_answer', JavaScriptInterpreter.dynamicImport(self.interpreter).solveChallenge(body, domain))
]
)
except Exception as e:
raise ValueError('Unable to parse Cloudflare anti-bots page: {} {}'.format(e.message, BUG_REPORT))
# Requests transforms any request into a GET after a redirect,
# so the redirect has to be handled manually here to allow for
# performing other types of requests even as the first request.
# ------------------------------------------------------------------------------- #
cloudflare_kwargs = deepcopy(original_kwargs)
cloudflare_kwargs.setdefault('params', params)
cloudflare_kwargs['allow_redirects'] = False
self.request(resp.request.method, submit_url, **cloudflare_kwargs)
submit_url = self.IUAM_Challenge_Response(
resp.text,
resp.url,
self.interpreter
)
return self.request(resp.request.method, resp.url, **original_kwargs)
# ------------------------------------------------------------------------------- #
# Send the Challenge Response back to Cloudflare
# ------------------------------------------------------------------------------- #
##########################################################################################################################################################
if submit_url:
def updateAttr(obj, name, newValue):
try:
obj[name].update(newValue)
return obj[name]
except (AttributeError, KeyError):
obj[name] = {}
obj[name].update(newValue)
return obj[name]
cloudflare_kwargs = deepcopy(kwargs)
cloudflare_kwargs['allow_redirects'] = False
cloudflare_kwargs['data'] = updateAttr(
cloudflare_kwargs,
'data',
submit_url['data']
)
cloudflare_kwargs['headers'] = updateAttr(
cloudflare_kwargs,
'headers',
{
'Referer': resp.url
}
)
return self.request(
'POST',
submit_url['url'],
**cloudflare_kwargs
)
# ------------------------------------------------------------------------------- #
# We shouldn't be here.... Re-request the original query and process again....
# ------------------------------------------------------------------------------- #
return self.request(resp.request.method, resp.url, **kwargs)
# ------------------------------------------------------------------------------- #
@classmethod
def create_scraper(cls, sess=None, **kwargs):
@@ -259,25 +469,30 @@ class CloudScraper(Session):
scraper = cls(**kwargs)
if sess:
attrs = ['auth', 'cert', 'cookies', 'headers', 'hooks', 'params', 'proxies', 'data']
for attr in attrs:
for attr in ['auth', 'cert', 'cookies', 'headers', 'hooks', 'params', 'proxies', 'data']:
val = getattr(sess, attr, None)
if val:
setattr(scraper, attr, val)
return scraper
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
# Functions for integrating cloudscraper with other applications and scripts
# ------------------------------------------------------------------------------- #
@classmethod
def get_tokens(cls, url, **kwargs):
scraper = cls.create_scraper(
debug=kwargs.pop('debug', False),
delay=kwargs.pop('delay', None),
interpreter=kwargs.pop('interpreter', 'js2py'),
allow_brotli=kwargs.pop('allow_brotli', True),
recaptcha=kwargs.pop('recaptcha', {})
**{
field: kwargs.pop(field, None) for field in [
'allow_brotli',
'browser',
'debug',
'delay',
'interpreter',
'recaptcha'
] if field in kwargs
}
)
try:
@@ -296,7 +511,11 @@ class CloudScraper(Session):
cookie_domain = d
break
else:
raise ValueError('Unable to find Cloudflare cookies. Does the site actually have Cloudflare IUAM ("I\'m Under Attack Mode") enabled?')
sys.tracebacklimit = 0
raise RuntimeError(
"Unable to find Cloudflare cookies. Does the site actually "
"have Cloudflare IUAM (I'm Under Attack Mode) enabled?"
)
return (
{
@@ -306,7 +525,7 @@ class CloudScraper(Session):
scraper.headers['User-Agent']
)
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
@classmethod
def get_cookie_string(cls, url, **kwargs):
@@ -317,7 +536,7 @@ class CloudScraper(Session):
return '; '.join('='.join(pair) for pair in tokens.items()), user_agent
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
create_scraper = CloudScraper.create_scraper
get_tokens = CloudScraper.get_tokens

0
lib/cloudscraper/interpreters/__init__.py Normal file → Executable file
View File

0
lib/cloudscraper/interpreters/chakracore.py Normal file → Executable file
View File

0
lib/cloudscraper/interpreters/js2py.py Normal file → Executable file
View File

0
lib/cloudscraper/interpreters/jsunfuck.py Normal file → Executable file
View File

14
lib/cloudscraper/interpreters/nodejs.py Normal file → Executable file
View File

@@ -1,14 +1,10 @@
import base64
import logging
import subprocess
import sys
from . import JavaScriptInterpreter
##########################################################################################################################################################
BUG_REPORT = 'Cloudflare may have changed their technique, or there may be a bug in the script.'
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
class ChallengeInterpreter(JavaScriptInterpreter):
@@ -37,10 +33,10 @@ class ChallengeInterpreter(JavaScriptInterpreter):
)
raise
except Exception:
logging.error('Error executing Cloudflare IUAM Javascript. %s' % BUG_REPORT)
raise
sys.tracebacklimit = 0
raise RuntimeError('Error executing Cloudflare IUAM Javascript in nodejs')
pass
# ------------------------------------------------------------------------------- #
ChallengeInterpreter()

8
lib/cloudscraper/interpreters/v8.py Normal file → Executable file
View File

@@ -10,17 +10,23 @@ except ImportError:
from . import JavaScriptInterpreter
# ------------------------------------------------------------------------------- #
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('v8')
# ------------------------------------------------------------------------------- #
def eval(self, jsEnv, js):
try:
return v8eval.V8().eval('{}{}'.format(jsEnv, js))
except: # noqa
except (TypeError, v8eval.V8Error):
RuntimeError('We encountered an error running the V8 Engine.')
# ------------------------------------------------------------------------------- #
ChallengeInterpreter()

View File

@@ -0,0 +1,206 @@
from __future__ import absolute_import
import requests
try:
import polling
except ImportError:
import sys
sys.tracebacklimit = 0
raise RuntimeError("Please install the python module 'polling' via pip or download it from https://github.com/justiniso/polling/")
from . import reCaptcha
class captchaSolver(reCaptcha):
def __init__(self):
super(captchaSolver, self).__init__('2captcha')
self.host = 'https://2captcha.com'
self.session = requests.Session()
# ------------------------------------------------------------------------------- #
@staticmethod
def checkErrorStatus(response, request_type):
if response.status_code in [500, 502]:
raise RuntimeError('2Captcha: Server Side Error {}'.format(response.status_code))
errors = {
'in.php': {
"ERROR_WRONG_USER_KEY": "You've provided api_key parameter value is in incorrect format, it should contain 32 symbols.",
"ERROR_KEY_DOES_NOT_EXIST": "The api_key you've provided does not exists.",
"ERROR_ZERO_BALANCE": "You don't have sufficient funds on your account.",
"ERROR_PAGEURL": "pageurl parameter is missing in your request.",
"ERROR_NO_SLOT_AVAILABLE":
"No Slots Available.\nYou can receive this error in two cases:\n"
"1. If you solve ReCaptcha: the queue of your captchas that are not distributed to workers is too long. "
"Queue limit changes dynamically and depends on total amount of captchas awaiting solution and usually it's between 50 and 100 captchas.\n"
"2. If you solve Normal Captcha: your maximum rate for normal captchas is lower than current rate on the server."
"You can change your maximum rate in your account's settings.",
"ERROR_IP_NOT_ALLOWED": "The request is sent from the IP that is not on the list of your allowed IPs.",
"IP_BANNED": "Your IP address is banned due to many frequent attempts to access the server using wrong authorization keys.",
"ERROR_BAD_TOKEN_OR_PAGEURL":
"You can get this error code when sending ReCaptcha V2. "
"That happens if your request contains invalid pair of googlekey and pageurl. "
"The common reason for that is that ReCaptcha is loaded inside an iframe hosted on another domain/subdomain.",
"ERROR_GOOGLEKEY":
"You can get this error code when sending ReCaptcha V2. "
"That means that sitekey value provided in your request is incorrect: it's blank or malformed.",
"MAX_USER_TURN": "You made more than 60 requests within 3 seconds.Your account is banned for 10 seconds. Ban will be lifted automatically."
},
'res.php': {
"ERROR_CAPTCHA_UNSOLVABLE":
"We are unable to solve your captcha - three of our workers were unable solve it "
"or we didn't get an answer within 90 seconds (300 seconds for ReCaptcha V2). "
"We will not charge you for that request.",
"ERROR_WRONG_USER_KEY": "You've provided api_key parameter value in incorrect format, it should contain 32 symbols.",
"ERROR_KEY_DOES_NOT_EXIST": "The api_key you've provided does not exists.",
"ERROR_WRONG_ID_FORMAT": "You've provided captcha ID in wrong format. The ID can contain numbers only.",
"ERROR_WRONG_CAPTCHA_ID": "You've provided incorrect captcha ID.",
"ERROR_BAD_DUPLICATES":
"Error is returned when 100% accuracy feature is enabled. "
"The error means that max numbers of tries is reached but min number of matches not found.",
"REPORT_NOT_RECORDED": "Error is returned to your complain request if you already complained lots of correctly solved captchas.",
"ERROR_IP_ADDRES":
"You can receive this error code when registering a pingback (callback) IP or domain."
"That happes if your request is coming from an IP address that doesn't match the IP address of your pingback IP or domain.",
"ERROR_TOKEN_EXPIRED": "You can receive this error code when sending GeeTest. That error means that challenge value you provided is expired.",
"ERROR_EMPTY_ACTION": "Action parameter is missing or no value is provided for action parameter."
}
}
if response.json().get('status') is False and response.json().get('request') in errors.get(request_type):
raise RuntimeError('{} {}'.format(response.json().get('request'), errors.get(request_type).get(response.json().get('request'))))
# ------------------------------------------------------------------------------- #
def reportJob(self, jobID):
if not jobID:
raise RuntimeError("2Captcha: Error bad job id to request reCaptcha.")
def _checkRequest(response):
if response.status_code in [200, 303] and response.json().get('status') == 1:
return response
self.checkErrorStatus(response, 'res.php')
return None
response = polling.poll(
lambda: self.session.get(
'{}/res.php'.format(self.host),
params={
'key': self.api_key,
'action': 'reportbad',
'id': jobID,
'json': '1'
}
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
return True
else:
raise RuntimeError("2Captcha: Error - Failed to report bad reCaptcha solve.")
# ------------------------------------------------------------------------------- #
def requestJob(self, jobID):
if not jobID:
raise RuntimeError("2Captcha: Error bad job id to request reCaptcha.")
def _checkRequest(response):
if response.status_code in [200, 303] and response.json().get('status') == 1:
return response
self.checkErrorStatus(response, 'res.php')
return None
response = polling.poll(
lambda: self.session.get(
'{}/res.php'.format(self.host),
params={
'key': self.api_key,
'action': 'get',
'id': jobID,
'json': '1'
}
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
return response.json().get('request')
else:
raise RuntimeError("2Captcha: Error failed to solve reCaptcha.")
# ------------------------------------------------------------------------------- #
def requestSolve(self, site_url, site_key):
def _checkRequest(response):
if response.status_code in [200, 303] and response.json().get("status") == 1 and response.json().get('request'):
return response
self.checkErrorStatus(response, 'in.php')
return None
response = polling.poll(
lambda: self.session.post(
'{}/in.php'.format(self.host),
data={
'key': self.api_key,
'method': 'userrecaptcha',
'googlekey': site_key,
'pageurl': site_url,
'json': '1',
'soft_id': '5507698'
},
allow_redirects=False
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
return response.json().get('request')
else:
raise RuntimeError('2Captcha: Error no job id was returned.')
# ------------------------------------------------------------------------------- #
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
jobID = None
if not reCaptchaParams.get('api_key'):
raise ValueError("2Captcha: Missing api_key parameter.")
self.api_key = reCaptchaParams.get('api_key')
if reCaptchaParams.get('proxy'):
self.session.proxies = reCaptchaParams.get('proxies')
try:
jobID = self.requestSolve(site_url, site_key)
return self.requestJob(jobID)
except polling.TimeoutException:
try:
if jobID:
self.reportJob(jobID)
except polling.TimeoutException:
raise RuntimeError("2Captcha: reCaptcha solve took to long and also failed reporting the job.")
raise RuntimeError("2Captcha: reCaptcha solve took to long to execute, aborting.")
# ------------------------------------------------------------------------------- #
captchaSolver()

22
lib/cloudscraper/reCaptcha/__init__.py Normal file → Executable file
View File

@@ -1,4 +1,3 @@
import re
import sys
import logging
import abc
@@ -8,20 +7,20 @@ if sys.version_info >= (3, 4):
else:
ABC = abc.ABCMeta('ABC', (), {})
##########################################################################################################################################################
BUG_REPORT = 'Cloudflare may have changed their technique, or there may be a bug in the script.'
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
captchaSolvers = {}
# ------------------------------------------------------------------------------- #
class reCaptcha(ABC):
@abc.abstractmethod
def __init__(self, name):
captchaSolvers[name] = self
# ------------------------------------------------------------------------------- #
@classmethod
def dynamicImport(cls, name):
if name not in captchaSolvers:
@@ -35,14 +34,13 @@ class reCaptcha(ABC):
return captchaSolvers[name]
# ------------------------------------------------------------------------------- #
@abc.abstractmethod
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
pass
def solveCaptcha(self, ret, reCaptchaParams):
try:
site_key = re.search('data-sitekey="(.+?)"', ret.text).group(1)
except Exception as e:
raise ValueError("Unable to parse Cloudflare\'s reCaptcha variable 'data-sitekey': {} {}".format(e.message, BUG_REPORT))
# ------------------------------------------------------------------------------- #
return self.getCaptchaAnswer(ret.url, site_key, reCaptchaParams)
def solveCaptcha(self, site_url, site_key, reCaptchaParams):
return self.getCaptchaAnswer(site_url, site_key, reCaptchaParams)

24
lib/cloudscraper/reCaptcha/anticaptcha.py Normal file → Executable file
View File

@@ -3,10 +3,10 @@ from __future__ import absolute_import
import sys
try:
from python_anticaptcha import AnticaptchaClient, NoCaptchaTaskProxylessTask, NoCaptchaTask, Proxy
from python_anticaptcha import AnticaptchaClient, NoCaptchaTaskProxylessTask
except ImportError:
sys.tracebacklimit = 0
raise RuntimeError("Please install the python module 'python_anticaptcha' via pip or download it https://github.com/ad-m/python-anticaptcha")
raise RuntimeError("Please install the python module 'python_anticaptcha' via pip or download it from https://github.com/ad-m/python-anticaptcha")
from . import reCaptcha
@@ -22,20 +22,16 @@ class captchaSolver(reCaptcha):
client = AnticaptchaClient(reCaptchaParams.get('api_key'))
if reCaptchaParams.get('proxy', False) and reCaptchaParams.get('proxies'):
if reCaptchaParams.get('proxy'):
client.session.proxies = reCaptchaParams.get('proxies')
task = NoCaptchaTask(
site_url,
site_key,
proxy=Proxy.parse_url(
reCaptchaParams.get('proxies').get('https')
)
)
else:
task = NoCaptchaTaskProxylessTask(site_url, site_key)
job = client.createTask(task)
job.join()
task = NoCaptchaTaskProxylessTask(site_url, site_key)
if not hasattr(client, 'createTaskSmee'):
sys.tracebacklimit = 0
raise RuntimeError("Please upgrade 'python_anticaptcha' via pip or download it from https://github.com/ad-m/python-anticaptcha")
job = client.createTaskSmee(task)
return job.get_solution_response()

17
lib/cloudscraper/reCaptcha/deathbycaptcha.py Normal file → Executable file
View File

@@ -20,9 +20,10 @@ class captchaSolver(reCaptcha):
self.host = 'http://api.dbcapi.me/api'
self.session = requests.Session()
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def checkErrorStatus(self, response):
@staticmethod
def checkErrorStatus(response):
errors = dict(
[
(400, "DeathByCaptcha: 400 Bad Request"),
@@ -35,7 +36,7 @@ class captchaSolver(reCaptcha):
if response.status_code in errors:
raise RuntimeError(errors.get(response.status_code))
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def login(self, username, password):
self.username = username
@@ -71,7 +72,7 @@ class captchaSolver(reCaptcha):
self.debugRequest(response)
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def reportJob(self, jobID):
if not jobID:
@@ -104,7 +105,7 @@ class captchaSolver(reCaptcha):
else:
raise RuntimeError("DeathByCaptcha: Error report failed reCaptcha.")
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def requestJob(self, jobID):
if not jobID:
@@ -133,7 +134,7 @@ class captchaSolver(reCaptcha):
else:
raise RuntimeError("DeathByCaptcha: Error failed to solve reCaptcha.")
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def requestSolve(self, site_url, site_key):
def _checkRequest(response):
@@ -169,7 +170,7 @@ class captchaSolver(reCaptcha):
else:
raise RuntimeError('DeathByCaptcha: Error no job id was returned.')
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
jobID = None
@@ -195,4 +196,6 @@ class captchaSolver(reCaptcha):
raise RuntimeError("DeathByCaptcha: reCaptcha solve took to long to execute, aborting.")
# ------------------------------------------------------------------------------- #
captchaSolver()

70
lib/cloudscraper/user_agent/__init__.py Normal file → Executable file
View File

@@ -1,46 +1,82 @@
import os
import json
import os
import random
import logging
import sys
from collections import OrderedDict
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
class User_Agent():
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def __init__(self, *args, **kwargs):
self.headers = None
self.cipherSuite = []
self.loadUserAgent(*args, **kwargs)
##########################################################################################################################################################
# ------------------------------------------------------------------------------- #
def loadHeaders(self, user_agents, user_agent_version):
if user_agents.get(self.browser).get('releases').get(user_agent_version).get('headers'):
self.headers = user_agents.get(self.browser).get('releases').get(user_agent_version).get('headers')
else:
self.headers = user_agents.get(self.browser).get('default_headers')
# ------------------------------------------------------------------------------- #
def filterAgents(self, releases):
filtered = {}
for release in releases:
if self.mobile and releases[release]['User-Agent']['mobile']:
filtered[release] = filtered.get(release, []) + releases[release]['User-Agent']['mobile']
if self.desktop and releases[release]['User-Agent']['desktop']:
filtered[release] = filtered.get(release, []) + releases[release]['User-Agent']['desktop']
return filtered
# ------------------------------------------------------------------------------- #
def loadUserAgent(self, *args, **kwargs):
browser = kwargs.pop('browser', None)
self.browser = kwargs.pop('browser', None)
if isinstance(self.browser, dict):
self.desktop = self.browser.get('desktop', True)
self.mobile = self.browser.get('mobile', True)
self.browser = self.browser.get('browser', None)
else:
self.desktop = kwargs.pop('desktop', True)
self.mobile = kwargs.pop('mobile', True)
if not self.desktop and not self.mobile:
sys.tracebacklimit = 0
raise RuntimeError("Sorry you can't have mobile and desktop disabled at the same time.")
user_agents = json.load(
open(os.path.join(os.path.dirname(__file__), 'browsers.json'), 'r'),
object_pairs_hook=OrderedDict
)
if browser and not user_agents.get(browser):
logging.error('Sorry "{}" browser User-Agent was not found.'.format(browser))
raise
if self.browser and not user_agents.get(self.browser):
sys.tracebacklimit = 0
raise RuntimeError('Sorry "{}" browser User-Agent was not found.'.format(self.browser))
if not browser:
browser = random.SystemRandom().choice(list(user_agents))
if not self.browser:
self.browser = random.SystemRandom().choice(list(user_agents))
user_agent_version = random.SystemRandom().choice(list(user_agents.get(browser).get('releases')))
self.cipherSuite = user_agents.get(self.browser).get('cipherSuite', [])
if user_agents.get(browser).get('releases').get(user_agent_version).get('headers'):
self.headers = user_agents.get(browser).get('releases').get(user_agent_version).get('headers')
else:
self.headers = user_agents.get(browser).get('default_headers')
filteredAgents = self.filterAgents(user_agents.get(self.browser).get('releases'))
self.headers['User-Agent'] = random.SystemRandom().choice(user_agents.get(browser).get('releases').get(user_agent_version).get('User-Agent'))
user_agent_version = random.SystemRandom().choice(list(filteredAgents))
self.loadHeaders(user_agents, user_agent_version)
self.headers['User-Agent'] = random.SystemRandom().choice(filteredAgents[user_agent_version])
if not kwargs.get('allow_brotli', False):
if 'br' in self.headers['Accept-Encoding']:

24234
lib/cloudscraper/user_agent/browsers.json Normal file → Executable file

File diff suppressed because it is too large Load Diff

56
lib/dns/__init__.py Normal file
View File

@@ -0,0 +1,56 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""dnspython DNS toolkit"""
__all__ = [
'dnssec',
'e164',
'edns',
'entropy',
'exception',
'flags',
'hash',
'inet',
'ipv4',
'ipv6',
'message',
'name',
'namedict',
'node',
'opcode',
'query',
'rcode',
'rdata',
'rdataclass',
'rdataset',
'rdatatype',
'renderer',
'resolver',
'reversename',
'rrset',
'set',
'tokenizer',
'tsig',
'tsigkeyring',
'ttl',
'rdtypes',
'update',
'version',
'wiredata',
'zone',
]

59
lib/dns/_compat.py Normal file
View File

@@ -0,0 +1,59 @@
import sys
import decimal
from decimal import Context
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
long = int
xrange = range
else:
long = long # pylint: disable=long-builtin
xrange = xrange # pylint: disable=xrange-builtin
# unicode / binary types
if PY3:
text_type = str
binary_type = bytes
string_types = (str,)
unichr = chr
def maybe_decode(x):
return x.decode()
def maybe_encode(x):
return x.encode()
def maybe_chr(x):
return x
def maybe_ord(x):
return x
else:
text_type = unicode # pylint: disable=unicode-builtin, undefined-variable
binary_type = str
string_types = (
basestring, # pylint: disable=basestring-builtin, undefined-variable
)
unichr = unichr # pylint: disable=unichr-builtin
def maybe_decode(x):
return x
def maybe_encode(x):
return x
def maybe_chr(x):
return chr(x)
def maybe_ord(x):
return ord(x)
def round_py2_compat(what):
"""
Python 2 and Python 3 use different rounding strategies in round(). This
function ensures that results are python2/3 compatible and backward
compatible with previous py2 releases
:param what: float
:return: rounded long
"""
d = Context(
prec=len(str(long(what))), # round to integer with max precision
rounding=decimal.ROUND_HALF_UP
).create_decimal(str(what)) # str(): python 2.6 compat
return long(d)

519
lib/dns/dnssec.py Normal file
View File

@@ -0,0 +1,519 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Common DNSSEC-related functions and constants."""
from io import BytesIO
import struct
import time
import dns.exception
import dns.name
import dns.node
import dns.rdataset
import dns.rdata
import dns.rdatatype
import dns.rdataclass
from ._compat import string_types
class UnsupportedAlgorithm(dns.exception.DNSException):
"""The DNSSEC algorithm is not supported."""
class ValidationFailure(dns.exception.DNSException):
"""The DNSSEC signature is invalid."""
#: RSAMD5
RSAMD5 = 1
#: DH
DH = 2
#: DSA
DSA = 3
#: ECC
ECC = 4
#: RSASHA1
RSASHA1 = 5
#: DSANSEC3SHA1
DSANSEC3SHA1 = 6
#: RSASHA1NSEC3SHA1
RSASHA1NSEC3SHA1 = 7
#: RSASHA256
RSASHA256 = 8
#: RSASHA512
RSASHA512 = 10
#: ECDSAP256SHA256
ECDSAP256SHA256 = 13
#: ECDSAP384SHA384
ECDSAP384SHA384 = 14
#: INDIRECT
INDIRECT = 252
#: PRIVATEDNS
PRIVATEDNS = 253
#: PRIVATEOID
PRIVATEOID = 254
_algorithm_by_text = {
'RSAMD5': RSAMD5,
'DH': DH,
'DSA': DSA,
'ECC': ECC,
'RSASHA1': RSASHA1,
'DSANSEC3SHA1': DSANSEC3SHA1,
'RSASHA1NSEC3SHA1': RSASHA1NSEC3SHA1,
'RSASHA256': RSASHA256,
'RSASHA512': RSASHA512,
'INDIRECT': INDIRECT,
'ECDSAP256SHA256': ECDSAP256SHA256,
'ECDSAP384SHA384': ECDSAP384SHA384,
'PRIVATEDNS': PRIVATEDNS,
'PRIVATEOID': PRIVATEOID,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_algorithm_by_value = {y: x for x, y in _algorithm_by_text.items()}
def algorithm_from_text(text):
"""Convert text into a DNSSEC algorithm value.
Returns an ``int``.
"""
value = _algorithm_by_text.get(text.upper())
if value is None:
value = int(text)
return value
def algorithm_to_text(value):
"""Convert a DNSSEC algorithm value to text
Returns a ``str``.
"""
text = _algorithm_by_value.get(value)
if text is None:
text = str(value)
return text
def _to_rdata(record, origin):
s = BytesIO()
record.to_wire(s, origin=origin)
return s.getvalue()
def key_id(key, origin=None):
"""Return the key id (a 16-bit number) for the specified key.
Note the *origin* parameter of this function is historical and
is not needed.
Returns an ``int`` between 0 and 65535.
"""
rdata = _to_rdata(key, origin)
rdata = bytearray(rdata)
if key.algorithm == RSAMD5:
return (rdata[-3] << 8) + rdata[-2]
else:
total = 0
for i in range(len(rdata) // 2):
total += (rdata[2 * i] << 8) + \
rdata[2 * i + 1]
if len(rdata) % 2 != 0:
total += rdata[len(rdata) - 1] << 8
total += ((total >> 16) & 0xffff)
return total & 0xffff
def make_ds(name, key, algorithm, origin=None):
"""Create a DS record for a DNSSEC key.
*name* is the owner name of the DS record.
*key* is a ``dns.rdtypes.ANY.DNSKEY``.
*algorithm* is a string describing which hash algorithm to use. The
currently supported hashes are "SHA1" and "SHA256". Case does not
matter for these strings.
*origin* is a ``dns.name.Name`` and will be used as the origin
if *key* is a relative name.
Returns a ``dns.rdtypes.ANY.DS``.
"""
if algorithm.upper() == 'SHA1':
dsalg = 1
hash = SHA1.new()
elif algorithm.upper() == 'SHA256':
dsalg = 2
hash = SHA256.new()
else:
raise UnsupportedAlgorithm('unsupported algorithm "%s"' % algorithm)
if isinstance(name, string_types):
name = dns.name.from_text(name, origin)
hash.update(name.canonicalize().to_wire())
hash.update(_to_rdata(key, origin))
digest = hash.digest()
dsrdata = struct.pack("!HBB", key_id(key), key.algorithm, dsalg) + digest
return dns.rdata.from_wire(dns.rdataclass.IN, dns.rdatatype.DS, dsrdata, 0,
len(dsrdata))
def _find_candidate_keys(keys, rrsig):
candidate_keys = []
value = keys.get(rrsig.signer)
if value is None:
return None
if isinstance(value, dns.node.Node):
try:
rdataset = value.find_rdataset(dns.rdataclass.IN,
dns.rdatatype.DNSKEY)
except KeyError:
return None
else:
rdataset = value
for rdata in rdataset:
if rdata.algorithm == rrsig.algorithm and \
key_id(rdata) == rrsig.key_tag:
candidate_keys.append(rdata)
return candidate_keys
def _is_rsa(algorithm):
return algorithm in (RSAMD5, RSASHA1,
RSASHA1NSEC3SHA1, RSASHA256,
RSASHA512)
def _is_dsa(algorithm):
return algorithm in (DSA, DSANSEC3SHA1)
def _is_ecdsa(algorithm):
return _have_ecdsa and (algorithm in (ECDSAP256SHA256, ECDSAP384SHA384))
def _is_md5(algorithm):
return algorithm == RSAMD5
def _is_sha1(algorithm):
return algorithm in (DSA, RSASHA1,
DSANSEC3SHA1, RSASHA1NSEC3SHA1)
def _is_sha256(algorithm):
return algorithm in (RSASHA256, ECDSAP256SHA256)
def _is_sha384(algorithm):
return algorithm == ECDSAP384SHA384
def _is_sha512(algorithm):
return algorithm == RSASHA512
def _make_hash(algorithm):
if _is_md5(algorithm):
return MD5.new()
if _is_sha1(algorithm):
return SHA1.new()
if _is_sha256(algorithm):
return SHA256.new()
if _is_sha384(algorithm):
return SHA384.new()
if _is_sha512(algorithm):
return SHA512.new()
raise ValidationFailure('unknown hash for algorithm %u' % algorithm)
def _make_algorithm_id(algorithm):
if _is_md5(algorithm):
oid = [0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05]
elif _is_sha1(algorithm):
oid = [0x2b, 0x0e, 0x03, 0x02, 0x1a]
elif _is_sha256(algorithm):
oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
elif _is_sha512(algorithm):
oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
else:
raise ValidationFailure('unknown algorithm %u' % algorithm)
olen = len(oid)
dlen = _make_hash(algorithm).digest_size
idbytes = [0x30] + [8 + olen + dlen] + \
[0x30, olen + 4] + [0x06, olen] + oid + \
[0x05, 0x00] + [0x04, dlen]
return struct.pack('!%dB' % len(idbytes), *idbytes)
def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
"""Validate an RRset against a single signature rdata
The owner name of *rrsig* is assumed to be the same as the owner name
of *rrset*.
*rrset* is the RRset to validate. It can be a ``dns.rrset.RRset`` or
a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple.
*rrsig* is a ``dns.rdata.Rdata``, the signature to validate.
*keys* is the key dictionary, used to find the DNSKEY associated with
a given name. The dictionary is keyed by a ``dns.name.Name``, and has
``dns.node.Node`` or ``dns.rdataset.Rdataset`` values.
*origin* is a ``dns.name.Name``, the origin to use for relative names.
*now* is an ``int``, the time to use when validating the signatures,
in seconds since the UNIX epoch. The default is the current time.
"""
if isinstance(origin, string_types):
origin = dns.name.from_text(origin, dns.name.root)
candidate_keys = _find_candidate_keys(keys, rrsig)
if candidate_keys is None:
raise ValidationFailure('unknown key')
for candidate_key in candidate_keys:
# For convenience, allow the rrset to be specified as a (name,
# rdataset) tuple as well as a proper rrset
if isinstance(rrset, tuple):
rrname = rrset[0]
rdataset = rrset[1]
else:
rrname = rrset.name
rdataset = rrset
if now is None:
now = time.time()
if rrsig.expiration < now:
raise ValidationFailure('expired')
if rrsig.inception > now:
raise ValidationFailure('not yet valid')
hash = _make_hash(rrsig.algorithm)
if _is_rsa(rrsig.algorithm):
keyptr = candidate_key.key
(bytes_,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
if bytes_ == 0:
(bytes_,) = struct.unpack('!H', keyptr[0:2])
keyptr = keyptr[2:]
rsa_e = keyptr[0:bytes_]
rsa_n = keyptr[bytes_:]
try:
pubkey = CryptoRSA.construct(
(number.bytes_to_long(rsa_n),
number.bytes_to_long(rsa_e)))
except ValueError:
raise ValidationFailure('invalid public key')
sig = rrsig.signature
elif _is_dsa(rrsig.algorithm):
keyptr = candidate_key.key
(t,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
octets = 64 + t * 8
dsa_q = keyptr[0:20]
keyptr = keyptr[20:]
dsa_p = keyptr[0:octets]
keyptr = keyptr[octets:]
dsa_g = keyptr[0:octets]
keyptr = keyptr[octets:]
dsa_y = keyptr[0:octets]
pubkey = CryptoDSA.construct(
(number.bytes_to_long(dsa_y),
number.bytes_to_long(dsa_g),
number.bytes_to_long(dsa_p),
number.bytes_to_long(dsa_q)))
sig = rrsig.signature[1:]
elif _is_ecdsa(rrsig.algorithm):
# use ecdsa for NIST-384p -- not currently supported by pycryptodome
keyptr = candidate_key.key
if rrsig.algorithm == ECDSAP256SHA256:
curve = ecdsa.curves.NIST256p
key_len = 32
elif rrsig.algorithm == ECDSAP384SHA384:
curve = ecdsa.curves.NIST384p
key_len = 48
x = number.bytes_to_long(keyptr[0:key_len])
y = number.bytes_to_long(keyptr[key_len:key_len * 2])
if not ecdsa.ecdsa.point_is_valid(curve.generator, x, y):
raise ValidationFailure('invalid ECDSA key')
point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point,
curve)
pubkey = ECKeyWrapper(verifying_key, key_len)
r = rrsig.signature[:key_len]
s = rrsig.signature[key_len:]
sig = ecdsa.ecdsa.Signature(number.bytes_to_long(r),
number.bytes_to_long(s))
else:
raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
hash.update(_to_rdata(rrsig, origin)[:18])
hash.update(rrsig.signer.to_digestable(origin))
if rrsig.labels < len(rrname) - 1:
suffix = rrname.split(rrsig.labels + 1)[1]
rrname = dns.name.from_text('*', suffix)
rrnamebuf = rrname.to_digestable(origin)
rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
rrsig.original_ttl)
rrlist = sorted(rdataset)
for rr in rrlist:
hash.update(rrnamebuf)
hash.update(rrfixed)
rrdata = rr.to_digestable(origin)
rrlen = struct.pack('!H', len(rrdata))
hash.update(rrlen)
hash.update(rrdata)
try:
if _is_rsa(rrsig.algorithm):
verifier = pkcs1_15.new(pubkey)
# will raise ValueError if verify fails:
verifier.verify(hash, sig)
elif _is_dsa(rrsig.algorithm):
verifier = DSS.new(pubkey, 'fips-186-3')
verifier.verify(hash, sig)
elif _is_ecdsa(rrsig.algorithm):
digest = hash.digest()
if not pubkey.verify(digest, sig):
raise ValueError
else:
# Raise here for code clarity; this won't actually ever happen
# since if the algorithm is really unknown we'd already have
# raised an exception above
raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
# If we got here, we successfully verified so we can return without error
return
except ValueError:
# this happens on an individual validation failure
continue
# nothing verified -- raise failure:
raise ValidationFailure('verify failure')
def _validate(rrset, rrsigset, keys, origin=None, now=None):
"""Validate an RRset.
*rrset* is the RRset to validate. It can be a ``dns.rrset.RRset`` or
a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple.
*rrsigset* is the signature RRset to be validated. It can be a
``dns.rrset.RRset`` or a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple.
*keys* is the key dictionary, used to find the DNSKEY associated with
a given name. The dictionary is keyed by a ``dns.name.Name``, and has
``dns.node.Node`` or ``dns.rdataset.Rdataset`` values.
*origin* is a ``dns.name.Name``, the origin to use for relative names.
*now* is an ``int``, the time to use when validating the signatures,
in seconds since the UNIX epoch. The default is the current time.
"""
if isinstance(origin, string_types):
origin = dns.name.from_text(origin, dns.name.root)
if isinstance(rrset, tuple):
rrname = rrset[0]
else:
rrname = rrset.name
if isinstance(rrsigset, tuple):
rrsigname = rrsigset[0]
rrsigrdataset = rrsigset[1]
else:
rrsigname = rrsigset.name
rrsigrdataset = rrsigset
rrname = rrname.choose_relativity(origin)
rrsigname = rrsigname.choose_relativity(origin)
if rrname != rrsigname:
raise ValidationFailure("owner names do not match")
for rrsig in rrsigrdataset:
try:
_validate_rrsig(rrset, rrsig, keys, origin, now)
return
except ValidationFailure:
pass
raise ValidationFailure("no RRSIGs validated")
def _need_pycrypto(*args, **kwargs):
raise NotImplementedError("DNSSEC validation requires pycryptodome/pycryptodomex")
try:
try:
# test we're using pycryptodome, not pycrypto (which misses SHA1 for example)
from Crypto.Hash import MD5, SHA1, SHA256, SHA384, SHA512
from Crypto.PublicKey import RSA as CryptoRSA, DSA as CryptoDSA
from Crypto.Signature import pkcs1_15, DSS
from Crypto.Util import number
except ImportError:
from Cryptodome.Hash import MD5, SHA1, SHA256, SHA384, SHA512
from Cryptodome.PublicKey import RSA as CryptoRSA, DSA as CryptoDSA
from Cryptodome.Signature import pkcs1_15, DSS
from Cryptodome.Util import number
except ImportError:
validate = _need_pycrypto
validate_rrsig = _need_pycrypto
_have_pycrypto = False
_have_ecdsa = False
else:
validate = _validate
validate_rrsig = _validate_rrsig
_have_pycrypto = True
try:
import ecdsa
import ecdsa.ecdsa
import ecdsa.ellipticcurve
import ecdsa.keys
except ImportError:
_have_ecdsa = False
else:
_have_ecdsa = True
class ECKeyWrapper(object):
def __init__(self, key, key_len):
self.key = key
self.key_len = key_len
def verify(self, digest, sig):
diglong = number.bytes_to_long(digest)
return self.key.pubkey.verifies(diglong, sig)

19
lib/dns/dnssec.pyi Normal file
View File

@@ -0,0 +1,19 @@
from typing import Union, Dict, Tuple, Optional
from . import rdataset, rrset, exception, name, rdtypes, rdata, node
import dns.rdtypes.ANY.DS as DS
import dns.rdtypes.ANY.DNSKEY as DNSKEY
_have_ecdsa : bool
_have_pycrypto : bool
def validate_rrsig(rrset : Union[Tuple[name.Name, rdataset.Rdataset], rrset.RRset], rrsig : rdata.Rdata, keys : Dict[name.Name, Union[node.Node, rdataset.Rdataset]], origin : Optional[name.Name] = None, now : Optional[int] = None) -> None:
...
def validate(rrset: Union[Tuple[name.Name, rdataset.Rdataset], rrset.RRset], rrsigset : Union[Tuple[name.Name, rdataset.Rdataset], rrset.RRset], keys : Dict[name.Name, Union[node.Node, rdataset.Rdataset]], origin=None, now=None) -> None:
...
class ValidationFailure(exception.DNSException):
...
def make_ds(name : name.Name, key : DNSKEY.DNSKEY, algorithm : str, origin : Optional[name.Name] = None) -> DS.DS:
...

105
lib/dns/e164.py Normal file
View File

@@ -0,0 +1,105 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2006-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS E.164 helpers."""
import dns.exception
import dns.name
import dns.resolver
from ._compat import string_types, maybe_decode
#: The public E.164 domain.
public_enum_domain = dns.name.from_text('e164.arpa.')
def from_e164(text, origin=public_enum_domain):
"""Convert an E.164 number in textual form into a Name object whose
value is the ENUM domain name for that number.
Non-digits in the text are ignored, i.e. "16505551212",
"+1.650.555.1212" and "1 (650) 555-1212" are all the same.
*text*, a ``text``, is an E.164 number in textual form.
*origin*, a ``dns.name.Name``, the domain in which the number
should be constructed. The default is ``e164.arpa.``.
Returns a ``dns.name.Name``.
"""
parts = [d for d in text if d.isdigit()]
parts.reverse()
return dns.name.from_text('.'.join(parts), origin=origin)
def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
"""Convert an ENUM domain name into an E.164 number.
Note that dnspython does not have any information about preferred
number formats within national numbering plans, so all numbers are
emitted as a simple string of digits, prefixed by a '+' (unless
*want_plus_prefix* is ``False``).
*name* is a ``dns.name.Name``, the ENUM domain name.
*origin* is a ``dns.name.Name``, a domain containing the ENUM
domain name. The name is relativized to this domain before being
converted to text. If ``None``, no relativization is done.
*want_plus_prefix* is a ``bool``. If True, add a '+' to the beginning of
the returned number.
Returns a ``text``.
"""
if origin is not None:
name = name.relativize(origin)
dlabels = [d for d in name.labels if d.isdigit() and len(d) == 1]
if len(dlabels) != len(name.labels):
raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
dlabels.reverse()
text = b''.join(dlabels)
if want_plus_prefix:
text = b'+' + text
return maybe_decode(text)
def query(number, domains, resolver=None):
"""Look for NAPTR RRs for the specified number in the specified domains.
e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
*number*, a ``text`` is the number to look for.
*domains* is an iterable containing ``dns.name.Name`` values.
*resolver*, a ``dns.resolver.Resolver``, is the resolver to use. If
``None``, the default resolver is used.
"""
if resolver is None:
resolver = dns.resolver.get_default_resolver()
e_nx = dns.resolver.NXDOMAIN()
for domain in domains:
if isinstance(domain, string_types):
domain = dns.name.from_text(domain)
qname = dns.e164.from_e164(number, domain)
try:
return resolver.query(qname, 'NAPTR')
except dns.resolver.NXDOMAIN as e:
e_nx += e
raise e_nx

10
lib/dns/e164.pyi Normal file
View File

@@ -0,0 +1,10 @@
from typing import Optional, Iterable
from . import name, resolver
def from_e164(text : str, origin=name.Name(".")) -> name.Name:
...
def to_e164(name : name.Name, origin : Optional[name.Name] = None, want_plus_prefix=True) -> str:
...
def query(number : str, domains : Iterable[str], resolver : Optional[resolver.Resolver] = None) -> resolver.Answer:
...

269
lib/dns/edns.py Normal file
View File

@@ -0,0 +1,269 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2009-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""EDNS Options"""
from __future__ import absolute_import
import math
import struct
import dns.inet
#: NSID
NSID = 3
#: DAU
DAU = 5
#: DHU
DHU = 6
#: N3U
N3U = 7
#: ECS (client-subnet)
ECS = 8
#: EXPIRE
EXPIRE = 9
#: COOKIE
COOKIE = 10
#: KEEPALIVE
KEEPALIVE = 11
#: PADDING
PADDING = 12
#: CHAIN
CHAIN = 13
class Option(object):
"""Base class for all EDNS option types."""
def __init__(self, otype):
"""Initialize an option.
*otype*, an ``int``, is the option type.
"""
self.otype = otype
def to_wire(self, file):
"""Convert an option to wire format.
"""
raise NotImplementedError
@classmethod
def from_wire(cls, otype, wire, current, olen):
"""Build an EDNS option object from wire format.
*otype*, an ``int``, is the option type.
*wire*, a ``binary``, is the wire-format message.
*current*, an ``int``, is the offset in *wire* of the beginning
of the rdata.
*olen*, an ``int``, is the length of the wire-format option data
Returns a ``dns.edns.Option``.
"""
raise NotImplementedError
def _cmp(self, other):
"""Compare an EDNS option with another option of the same type.
Returns < 0 if < *other*, 0 if == *other*, and > 0 if > *other*.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) > 0
class GenericOption(Option):
"""Generic Option Class
This class is used for EDNS option types for which we have no better
implementation.
"""
def __init__(self, otype, data):
super(GenericOption, self).__init__(otype)
self.data = data
def to_wire(self, file):
file.write(self.data)
def to_text(self):
return "Generic %d" % self.otype
@classmethod
def from_wire(cls, otype, wire, current, olen):
return cls(otype, wire[current: current + olen])
def _cmp(self, other):
if self.data == other.data:
return 0
if self.data > other.data:
return 1
return -1
class ECSOption(Option):
"""EDNS Client Subnet (ECS, RFC7871)"""
def __init__(self, address, srclen=None, scopelen=0):
"""*address*, a ``text``, is the client address information.
*srclen*, an ``int``, the source prefix length, which is the
leftmost number of bits of the address to be used for the
lookup. The default is 24 for IPv4 and 56 for IPv6.
*scopelen*, an ``int``, the scope prefix length. This value
must be 0 in queries, and should be set in responses.
"""
super(ECSOption, self).__init__(ECS)
af = dns.inet.af_for_address(address)
if af == dns.inet.AF_INET6:
self.family = 2
if srclen is None:
srclen = 56
elif af == dns.inet.AF_INET:
self.family = 1
if srclen is None:
srclen = 24
else:
raise ValueError('Bad ip family')
self.address = address
self.srclen = srclen
self.scopelen = scopelen
addrdata = dns.inet.inet_pton(af, address)
nbytes = int(math.ceil(srclen/8.0))
# Truncate to srclen and pad to the end of the last octet needed
# See RFC section 6
self.addrdata = addrdata[:nbytes]
nbits = srclen % 8
if nbits != 0:
last = struct.pack('B', ord(self.addrdata[-1:]) & (0xff << nbits))
self.addrdata = self.addrdata[:-1] + last
def to_text(self):
return "ECS {}/{} scope/{}".format(self.address, self.srclen,
self.scopelen)
def to_wire(self, file):
file.write(struct.pack('!H', self.family))
file.write(struct.pack('!BB', self.srclen, self.scopelen))
file.write(self.addrdata)
@classmethod
def from_wire(cls, otype, wire, cur, olen):
family, src, scope = struct.unpack('!HBB', wire[cur:cur+4])
cur += 4
addrlen = int(math.ceil(src/8.0))
if family == 1:
af = dns.inet.AF_INET
pad = 4 - addrlen
elif family == 2:
af = dns.inet.AF_INET6
pad = 16 - addrlen
else:
raise ValueError('unsupported family')
addr = dns.inet.inet_ntop(af, wire[cur:cur+addrlen] + b'\x00' * pad)
return cls(addr, src, scope)
def _cmp(self, other):
if self.addrdata == other.addrdata:
return 0
if self.addrdata > other.addrdata:
return 1
return -1
_type_to_class = {
ECS: ECSOption
}
def get_option_class(otype):
"""Return the class for the specified option type.
The GenericOption class is used if a more specific class is not
known.
"""
cls = _type_to_class.get(otype)
if cls is None:
cls = GenericOption
return cls
def option_from_wire(otype, wire, current, olen):
"""Build an EDNS option object from wire format.
*otype*, an ``int``, is the option type.
*wire*, a ``binary``, is the wire-format message.
*current*, an ``int``, is the offset in *wire* of the beginning
of the rdata.
*olen*, an ``int``, is the length of the wire-format option data
Returns an instance of a subclass of ``dns.edns.Option``.
"""
cls = get_option_class(otype)
return cls.from_wire(otype, wire, current, olen)

148
lib/dns/entropy.py Normal file
View File

@@ -0,0 +1,148 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2009-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import random
import time
from ._compat import long, binary_type
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
class EntropyPool(object):
# This is an entropy pool for Python implementations that do not
# have a working SystemRandom. I'm not sure there are any, but
# leaving this code doesn't hurt anything as the library code
# is used if present.
def __init__(self, seed=None):
self.pool_index = 0
self.digest = None
self.next_byte = 0
self.lock = _threading.Lock()
try:
import hashlib
self.hash = hashlib.sha1()
self.hash_len = 20
except ImportError:
try:
import sha
self.hash = sha.new()
self.hash_len = 20
except ImportError:
import md5 # pylint: disable=import-error
self.hash = md5.new()
self.hash_len = 16
self.pool = bytearray(b'\0' * self.hash_len)
if seed is not None:
self.stir(bytearray(seed))
self.seeded = True
self.seed_pid = os.getpid()
else:
self.seeded = False
self.seed_pid = 0
def stir(self, entropy, already_locked=False):
if not already_locked:
self.lock.acquire()
try:
for c in entropy:
if self.pool_index == self.hash_len:
self.pool_index = 0
b = c & 0xff
self.pool[self.pool_index] ^= b
self.pool_index += 1
finally:
if not already_locked:
self.lock.release()
def _maybe_seed(self):
if not self.seeded or self.seed_pid != os.getpid():
try:
seed = os.urandom(16)
except Exception:
try:
r = open('/dev/urandom', 'rb', 0)
try:
seed = r.read(16)
finally:
r.close()
except Exception:
seed = str(time.time())
self.seeded = True
self.seed_pid = os.getpid()
self.digest = None
seed = bytearray(seed)
self.stir(seed, True)
def random_8(self):
self.lock.acquire()
try:
self._maybe_seed()
if self.digest is None or self.next_byte == self.hash_len:
self.hash.update(binary_type(self.pool))
self.digest = bytearray(self.hash.digest())
self.stir(self.digest, True)
self.next_byte = 0
value = self.digest[self.next_byte]
self.next_byte += 1
finally:
self.lock.release()
return value
def random_16(self):
return self.random_8() * 256 + self.random_8()
def random_32(self):
return self.random_16() * 65536 + self.random_16()
def random_between(self, first, last):
size = last - first + 1
if size > long(4294967296):
raise ValueError('too big')
if size > 65536:
rand = self.random_32
max = long(4294967295)
elif size > 256:
rand = self.random_16
max = 65535
else:
rand = self.random_8
max = 255
return first + size * rand() // (max + 1)
pool = EntropyPool()
try:
system_random = random.SystemRandom()
except Exception:
system_random = None
def random_16():
if system_random is not None:
return system_random.randrange(0, 65536)
else:
return pool.random_16()
def between(first, last):
if system_random is not None:
return system_random.randrange(first, last + 1)
else:
return pool.random_between(first, last)

10
lib/dns/entropy.pyi Normal file
View File

@@ -0,0 +1,10 @@
from typing import Optional
from random import SystemRandom
system_random : Optional[SystemRandom]
def random_16() -> int:
pass
def between(first: int, last: int) -> int:
pass

128
lib/dns/exception.py Normal file
View File

@@ -0,0 +1,128 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Common DNS Exceptions.
Dnspython modules may also define their own exceptions, which will
always be subclasses of ``DNSException``.
"""
class DNSException(Exception):
"""Abstract base class shared by all dnspython exceptions.
It supports two basic modes of operation:
a) Old/compatible mode is used if ``__init__`` was called with
empty *kwargs*. In compatible mode all *args* are passed
to the standard Python Exception class as before and all *args* are
printed by the standard ``__str__`` implementation. Class variable
``msg`` (or doc string if ``msg`` is ``None``) is returned from ``str()``
if *args* is empty.
b) New/parametrized mode is used if ``__init__`` was called with
non-empty *kwargs*.
In the new mode *args* must be empty and all kwargs must match
those set in class variable ``supp_kwargs``. All kwargs are stored inside
``self.kwargs`` and used in a new ``__str__`` implementation to construct
a formatted message based on the ``fmt`` class variable, a ``string``.
In the simplest case it is enough to override the ``supp_kwargs``
and ``fmt`` class variables to get nice parametrized messages.
"""
msg = None # non-parametrized message
supp_kwargs = set() # accepted parameters for _fmt_kwargs (sanity check)
fmt = None # message parametrized with results from _fmt_kwargs
def __init__(self, *args, **kwargs):
self._check_params(*args, **kwargs)
if kwargs:
self.kwargs = self._check_kwargs(**kwargs)
self.msg = str(self)
else:
self.kwargs = dict() # defined but empty for old mode exceptions
if self.msg is None:
# doc string is better implicit message than empty string
self.msg = self.__doc__
if args:
super(DNSException, self).__init__(*args)
else:
super(DNSException, self).__init__(self.msg)
def _check_params(self, *args, **kwargs):
"""Old exceptions supported only args and not kwargs.
For sanity we do not allow to mix old and new behavior."""
if args or kwargs:
assert bool(args) != bool(kwargs), \
'keyword arguments are mutually exclusive with positional args'
def _check_kwargs(self, **kwargs):
if kwargs:
assert set(kwargs.keys()) == self.supp_kwargs, \
'following set of keyword args is required: %s' % (
self.supp_kwargs)
return kwargs
def _fmt_kwargs(self, **kwargs):
"""Format kwargs before printing them.
Resulting dictionary has to have keys necessary for str.format call
on fmt class variable.
"""
fmtargs = {}
for kw, data in kwargs.items():
if isinstance(data, (list, set)):
# convert list of <someobj> to list of str(<someobj>)
fmtargs[kw] = list(map(str, data))
if len(fmtargs[kw]) == 1:
# remove list brackets [] from single-item lists
fmtargs[kw] = fmtargs[kw].pop()
else:
fmtargs[kw] = data
return fmtargs
def __str__(self):
if self.kwargs and self.fmt:
# provide custom message constructed from keyword arguments
fmtargs = self._fmt_kwargs(**self.kwargs)
return self.fmt.format(**fmtargs)
else:
# print *args directly in the same way as old DNSException
return super(DNSException, self).__str__()
class FormError(DNSException):
"""DNS message is malformed."""
class SyntaxError(DNSException):
"""Text input is malformed."""
class UnexpectedEnd(SyntaxError):
"""Text input ended unexpectedly."""
class TooBig(DNSException):
"""The DNS message is too big."""
class Timeout(DNSException):
"""The DNS operation timed out."""
supp_kwargs = {'timeout'}
fmt = "The DNS operation timed out after {timeout} seconds"

9
lib/dns/exception.pyi Normal file
View File

@@ -0,0 +1,9 @@
from typing import Set, Optional, Dict
class DNSException(Exception):
supp_kwargs : Set[str]
kwargs : Optional[Dict]
class SyntaxError(DNSException): ...
class FormError(DNSException): ...
class Timeout(DNSException): ...

130
lib/dns/flags.py Normal file
View File

@@ -0,0 +1,130 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Message Flags."""
# Standard DNS flags
#: Query Response
QR = 0x8000
#: Authoritative Answer
AA = 0x0400
#: Truncated Response
TC = 0x0200
#: Recursion Desired
RD = 0x0100
#: Recursion Available
RA = 0x0080
#: Authentic Data
AD = 0x0020
#: Checking Disabled
CD = 0x0010
# EDNS flags
#: DNSSEC answer OK
DO = 0x8000
_by_text = {
'QR': QR,
'AA': AA,
'TC': TC,
'RD': RD,
'RA': RA,
'AD': AD,
'CD': CD
}
_edns_by_text = {
'DO': DO
}
# We construct the inverse mappings programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mappings not to be true inverses.
_by_value = {y: x for x, y in _by_text.items()}
_edns_by_value = {y: x for x, y in _edns_by_text.items()}
def _order_flags(table):
order = list(table.items())
order.sort()
order.reverse()
return order
_flags_order = _order_flags(_by_value)
_edns_flags_order = _order_flags(_edns_by_value)
def _from_text(text, table):
flags = 0
tokens = text.split()
for t in tokens:
flags = flags | table[t.upper()]
return flags
def _to_text(flags, table, order):
text_flags = []
for k, v in order:
if flags & k != 0:
text_flags.append(v)
return ' '.join(text_flags)
def from_text(text):
"""Convert a space-separated list of flag text values into a flags
value.
Returns an ``int``
"""
return _from_text(text, _by_text)
def to_text(flags):
"""Convert a flags value into a space-separated list of flag text
values.
Returns a ``text``.
"""
return _to_text(flags, _by_value, _flags_order)
def edns_from_text(text):
"""Convert a space-separated list of EDNS flag text values into a EDNS
flags value.
Returns an ``int``
"""
return _from_text(text, _edns_by_text)
def edns_to_text(flags):
"""Convert an EDNS flags value into a space-separated list of EDNS flag
text values.
Returns a ``text``.
"""
return _to_text(flags, _edns_by_value, _edns_flags_order)

69
lib/dns/grange.py Normal file
View File

@@ -0,0 +1,69 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2012-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS GENERATE range conversion."""
import dns
def from_text(text):
"""Convert the text form of a range in a ``$GENERATE`` statement to an
integer.
*text*, a ``str``, the textual range in ``$GENERATE`` form.
Returns a tuple of three ``int`` values ``(start, stop, step)``.
"""
# TODO, figure out the bounds on start, stop and step.
step = 1
cur = ''
state = 0
# state 0 1 2 3 4
# x - y / z
if text and text[0] == '-':
raise dns.exception.SyntaxError("Start cannot be a negative number")
for c in text:
if c == '-' and state == 0:
start = int(cur)
cur = ''
state = 2
elif c == '/':
stop = int(cur)
cur = ''
state = 4
elif c.isdigit():
cur += c
else:
raise dns.exception.SyntaxError("Could not parse %s" % (c))
if state in (1, 3):
raise dns.exception.SyntaxError()
if state == 2:
stop = int(cur)
if state == 4:
step = int(cur)
assert step >= 1
assert start >= 0
assert start <= stop
# TODO, can start == stop?
return (start, stop, step)

37
lib/dns/hash.py Normal file
View File

@@ -0,0 +1,37 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Hashing backwards compatibility wrapper"""
import hashlib
import warnings
warnings.warn(
"dns.hash module will be removed in future versions. Please use hashlib instead.",
DeprecationWarning)
hashes = {}
hashes['MD5'] = hashlib.md5
hashes['SHA1'] = hashlib.sha1
hashes['SHA224'] = hashlib.sha224
hashes['SHA256'] = hashlib.sha256
hashes['SHA384'] = hashlib.sha384
hashes['SHA512'] = hashlib.sha512
def get(algorithm):
return hashes[algorithm.upper()]

124
lib/dns/inet.py Normal file
View File

@@ -0,0 +1,124 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generic Internet address helper functions."""
import socket
import dns.ipv4
import dns.ipv6
from ._compat import maybe_ord
# We assume that AF_INET is always defined.
AF_INET = socket.AF_INET
# AF_INET6 might not be defined in the socket module, but we need it.
# We'll try to use the socket module's value, and if it doesn't work,
# we'll use our own value.
try:
AF_INET6 = socket.AF_INET6
except AttributeError:
AF_INET6 = 9999
def inet_pton(family, text):
"""Convert the textual form of a network address into its binary form.
*family* is an ``int``, the address family.
*text* is a ``text``, the textual address.
Raises ``NotImplementedError`` if the address family specified is not
implemented.
Returns a ``binary``.
"""
if family == AF_INET:
return dns.ipv4.inet_aton(text)
elif family == AF_INET6:
return dns.ipv6.inet_aton(text)
else:
raise NotImplementedError
def inet_ntop(family, address):
"""Convert the binary form of a network address into its textual form.
*family* is an ``int``, the address family.
*address* is a ``binary``, the network address in binary form.
Raises ``NotImplementedError`` if the address family specified is not
implemented.
Returns a ``text``.
"""
if family == AF_INET:
return dns.ipv4.inet_ntoa(address)
elif family == AF_INET6:
return dns.ipv6.inet_ntoa(address)
else:
raise NotImplementedError
def af_for_address(text):
"""Determine the address family of a textual-form network address.
*text*, a ``text``, the textual address.
Raises ``ValueError`` if the address family cannot be determined
from the input.
Returns an ``int``.
"""
try:
dns.ipv4.inet_aton(text)
return AF_INET
except Exception:
try:
dns.ipv6.inet_aton(text)
return AF_INET6
except:
raise ValueError
def is_multicast(text):
"""Is the textual-form network address a multicast address?
*text*, a ``text``, the textual address.
Raises ``ValueError`` if the address family cannot be determined
from the input.
Returns a ``bool``.
"""
try:
first = maybe_ord(dns.ipv4.inet_aton(text)[0])
return first >= 224 and first <= 239
except Exception:
try:
first = maybe_ord(dns.ipv6.inet_aton(text)[0])
return first == 255
except Exception:
raise ValueError

4
lib/dns/inet.pyi Normal file
View File

@@ -0,0 +1,4 @@
from typing import Union
from socket import AddressFamily
AF_INET6 : Union[int, AddressFamily]

63
lib/dns/ipv4.py Normal file
View File

@@ -0,0 +1,63 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""IPv4 helper functions."""
import struct
import dns.exception
from ._compat import binary_type
def inet_ntoa(address):
"""Convert an IPv4 address in binary form to text form.
*address*, a ``binary``, the IPv4 address in binary form.
Returns a ``text``.
"""
if len(address) != 4:
raise dns.exception.SyntaxError
if not isinstance(address, bytearray):
address = bytearray(address)
return ('%u.%u.%u.%u' % (address[0], address[1],
address[2], address[3]))
def inet_aton(text):
"""Convert an IPv4 address in text form to binary form.
*text*, a ``text``, the IPv4 address in textual form.
Returns a ``binary``.
"""
if not isinstance(text, binary_type):
text = text.encode()
parts = text.split(b'.')
if len(parts) != 4:
raise dns.exception.SyntaxError
for part in parts:
if not part.isdigit():
raise dns.exception.SyntaxError
if len(part) > 1 and part[0] == '0':
# No leading zeros
raise dns.exception.SyntaxError
try:
bytes = [int(part) for part in parts]
return struct.pack('BBBB', *bytes)
except:
raise dns.exception.SyntaxError

181
lib/dns/ipv6.py Normal file
View File

@@ -0,0 +1,181 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""IPv6 helper functions."""
import re
import binascii
import dns.exception
import dns.ipv4
from ._compat import xrange, binary_type, maybe_decode
_leading_zero = re.compile(r'0+([0-9a-f]+)')
def inet_ntoa(address):
"""Convert an IPv6 address in binary form to text form.
*address*, a ``binary``, the IPv6 address in binary form.
Raises ``ValueError`` if the address isn't 16 bytes long.
Returns a ``text``.
"""
if len(address) != 16:
raise ValueError("IPv6 addresses are 16 bytes long")
hex = binascii.hexlify(address)
chunks = []
i = 0
l = len(hex)
while i < l:
chunk = maybe_decode(hex[i : i + 4])
# strip leading zeros. we do this with an re instead of
# with lstrip() because lstrip() didn't support chars until
# python 2.2.2
m = _leading_zero.match(chunk)
if not m is None:
chunk = m.group(1)
chunks.append(chunk)
i += 4
#
# Compress the longest subsequence of 0-value chunks to ::
#
best_start = 0
best_len = 0
start = -1
last_was_zero = False
for i in xrange(8):
if chunks[i] != '0':
if last_was_zero:
end = i
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
last_was_zero = False
elif not last_was_zero:
start = i
last_was_zero = True
if last_was_zero:
end = 8
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
if best_len > 1:
if best_start == 0 and \
(best_len == 6 or
best_len == 5 and chunks[5] == 'ffff'):
# We have an embedded IPv4 address
if best_len == 6:
prefix = '::'
else:
prefix = '::ffff:'
hex = prefix + dns.ipv4.inet_ntoa(address[12:])
else:
hex = ':'.join(chunks[:best_start]) + '::' + \
':'.join(chunks[best_start + best_len:])
else:
hex = ':'.join(chunks)
return hex
_v4_ending = re.compile(br'(.*):(\d+\.\d+\.\d+\.\d+)$')
_colon_colon_start = re.compile(br'::.*')
_colon_colon_end = re.compile(br'.*::$')
def inet_aton(text):
"""Convert an IPv6 address in text form to binary form.
*text*, a ``text``, the IPv6 address in textual form.
Returns a ``binary``.
"""
#
# Our aim here is not something fast; we just want something that works.
#
if not isinstance(text, binary_type):
text = text.encode()
if text == b'::':
text = b'0::'
#
# Get rid of the icky dot-quad syntax if we have it.
#
m = _v4_ending.match(text)
if not m is None:
b = bytearray(dns.ipv4.inet_aton(m.group(2)))
text = (u"{}:{:02x}{:02x}:{:02x}{:02x}".format(m.group(1).decode(),
b[0], b[1], b[2],
b[3])).encode()
#
# Try to turn '::<whatever>' into ':<whatever>'; if no match try to
# turn '<whatever>::' into '<whatever>:'
#
m = _colon_colon_start.match(text)
if not m is None:
text = text[1:]
else:
m = _colon_colon_end.match(text)
if not m is None:
text = text[:-1]
#
# Now canonicalize into 8 chunks of 4 hex digits each
#
chunks = text.split(b':')
l = len(chunks)
if l > 8:
raise dns.exception.SyntaxError
seen_empty = False
canonical = []
for c in chunks:
if c == b'':
if seen_empty:
raise dns.exception.SyntaxError
seen_empty = True
for i in xrange(0, 8 - l + 1):
canonical.append(b'0000')
else:
lc = len(c)
if lc > 4:
raise dns.exception.SyntaxError
if lc != 4:
c = (b'0' * (4 - lc)) + c
canonical.append(c)
if l < 8 and not seen_empty:
raise dns.exception.SyntaxError
text = b''.join(canonical)
#
# Finally we can go to binary.
#
try:
return binascii.unhexlify(text)
except (binascii.Error, TypeError):
raise dns.exception.SyntaxError
_mapped_prefix = b'\x00' * 10 + b'\xff\xff'
def is_mapped(address):
"""Is the specified address a mapped IPv4 address?
*address*, a ``binary`` is an IPv6 address in binary form.
Returns a ``bool``.
"""
return address.startswith(_mapped_prefix)

1175
lib/dns/message.py Normal file

File diff suppressed because it is too large Load Diff

55
lib/dns/message.pyi Normal file
View File

@@ -0,0 +1,55 @@
from typing import Optional, Dict, List, Tuple, Union
from . import name, rrset, tsig, rdatatype, entropy, edns, rdataclass
import hmac
class Message:
def to_wire(self, origin : Optional[name.Name]=None, max_size=0, **kw) -> bytes:
...
def find_rrset(self, section : List[rrset.RRset], name : name.Name, rdclass : int, rdtype : int,
covers=rdatatype.NONE, deleting : Optional[int]=None, create=False,
force_unique=False) -> rrset.RRset:
...
def __init__(self, id : Optional[int] =None) -> None:
self.id : int
self.flags = 0
self.question : List[rrset.RRset] = []
self.answer : List[rrset.RRset] = []
self.authority : List[rrset.RRset] = []
self.additional : List[rrset.RRset] = []
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.options : List[edns.Option] = []
self.request_payload = 0
self.keyring = None
self.keyname = None
self.keyalgorithm = tsig.default_algorithm
self.request_mac = b''
self.other_data = b''
self.tsig_error = 0
self.fudge = 300
self.original_id = self.id
self.mac = b''
self.xfr = False
self.origin = None
self.tsig_ctx = None
self.had_tsig = False
self.multi = False
self.first = True
self.index : Dict[Tuple[rrset.RRset, name.Name, int, int, Union[int,str], int], rrset.RRset] = {}
def from_text(a : str) -> Message:
...
def from_wire(wire, keyring : Optional[Dict[name.Name,bytes]] = None, request_mac = b'', xfr=False, origin=None,
tsig_ctx : Optional[hmac.HMAC] = None, multi=False, first=True,
question_only=False, one_rr_per_rrset=False,
ignore_trailing=False) -> Message:
...
def make_response(query : Message, recursion_available=False, our_payload=8192,
fudge=300) -> Message:
...
def make_query(qname : Union[name.Name,str], rdtype : Union[str,int], rdclass : Union[int,str] =rdataclass.IN, use_edns : Optional[bool] = None,
want_dnssec=False, ednsflags : Optional[int] = None, payload : Optional[int] = None,
request_payload : Optional[int] = None, options : Optional[List[edns.Option]] = None) -> Message:
...

994
lib/dns/name.py Normal file
View File

@@ -0,0 +1,994 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Names.
"""
from io import BytesIO
import struct
import sys
import copy
import encodings.idna
try:
import idna
have_idna_2008 = True
except ImportError:
have_idna_2008 = False
import dns.exception
import dns.wiredata
from ._compat import long, binary_type, text_type, unichr, maybe_decode
try:
maxint = sys.maxint # pylint: disable=sys-max-int
except AttributeError:
maxint = (1 << (8 * struct.calcsize("P"))) // 2 - 1
# fullcompare() result values
#: The compared names have no relationship to each other.
NAMERELN_NONE = 0
#: the first name is a superdomain of the second.
NAMERELN_SUPERDOMAIN = 1
#: The first name is a subdomain of the second.
NAMERELN_SUBDOMAIN = 2
#: The compared names are equal.
NAMERELN_EQUAL = 3
#: The compared names have a common ancestor.
NAMERELN_COMMONANCESTOR = 4
class EmptyLabel(dns.exception.SyntaxError):
"""A DNS label is empty."""
class BadEscape(dns.exception.SyntaxError):
"""An escaped code in a text format of DNS name is invalid."""
class BadPointer(dns.exception.FormError):
"""A DNS compression pointer points forward instead of backward."""
class BadLabelType(dns.exception.FormError):
"""The label type in DNS name wire format is unknown."""
class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
"""An attempt was made to convert a non-absolute name to
wire when there was also a non-absolute (or missing) origin."""
class NameTooLong(dns.exception.FormError):
"""A DNS name is > 255 octets long."""
class LabelTooLong(dns.exception.SyntaxError):
"""A DNS label is > 63 octets long."""
class AbsoluteConcatenation(dns.exception.DNSException):
"""An attempt was made to append anything other than the
empty name to an absolute DNS name."""
class NoParent(dns.exception.DNSException):
"""An attempt was made to get the parent of the root name
or the empty name."""
class NoIDNA2008(dns.exception.DNSException):
"""IDNA 2008 processing was requested but the idna module is not
available."""
class IDNAException(dns.exception.DNSException):
"""IDNA processing raised an exception."""
supp_kwargs = {'idna_exception'}
fmt = "IDNA processing exception: {idna_exception}"
class IDNACodec(object):
"""Abstract base class for IDNA encoder/decoders."""
def __init__(self):
pass
def encode(self, label):
raise NotImplementedError
def decode(self, label):
# We do not apply any IDNA policy on decode; we just
downcased = label.lower()
if downcased.startswith(b'xn--'):
try:
label = downcased[4:].decode('punycode')
except Exception as e:
raise IDNAException(idna_exception=e)
else:
label = maybe_decode(label)
return _escapify(label, True)
class IDNA2003Codec(IDNACodec):
"""IDNA 2003 encoder/decoder."""
def __init__(self, strict_decode=False):
"""Initialize the IDNA 2003 encoder/decoder.
*strict_decode* is a ``bool``. If `True`, then IDNA2003 checking
is done when decoding. This can cause failures if the name
was encoded with IDNA2008. The default is `False`.
"""
super(IDNA2003Codec, self).__init__()
self.strict_decode = strict_decode
def encode(self, label):
"""Encode *label*."""
if label == '':
return b''
try:
return encodings.idna.ToASCII(label)
except UnicodeError:
raise LabelTooLong
def decode(self, label):
"""Decode *label*."""
if not self.strict_decode:
return super(IDNA2003Codec, self).decode(label)
if label == b'':
return u''
try:
return _escapify(encodings.idna.ToUnicode(label), True)
except Exception as e:
raise IDNAException(idna_exception=e)
class IDNA2008Codec(IDNACodec):
"""IDNA 2008 encoder/decoder.
*uts_46* is a ``bool``. If True, apply Unicode IDNA
compatibility processing as described in Unicode Technical
Standard #46 (http://unicode.org/reports/tr46/).
If False, do not apply the mapping. The default is False.
*transitional* is a ``bool``: If True, use the
"transitional" mode described in Unicode Technical Standard
#46. The default is False.
*allow_pure_ascii* is a ``bool``. If True, then a label which
consists of only ASCII characters is allowed. This is less
strict than regular IDNA 2008, but is also necessary for mixed
names, e.g. a name with starting with "_sip._tcp." and ending
in an IDN suffix which would otherwise be disallowed. The
default is False.
*strict_decode* is a ``bool``: If True, then IDNA2008 checking
is done when decoding. This can cause failures if the name
was encoded with IDNA2003. The default is False.
"""
def __init__(self, uts_46=False, transitional=False,
allow_pure_ascii=False, strict_decode=False):
"""Initialize the IDNA 2008 encoder/decoder."""
super(IDNA2008Codec, self).__init__()
self.uts_46 = uts_46
self.transitional = transitional
self.allow_pure_ascii = allow_pure_ascii
self.strict_decode = strict_decode
def is_all_ascii(self, label):
for c in label:
if ord(c) > 0x7f:
return False
return True
def encode(self, label):
if label == '':
return b''
if self.allow_pure_ascii and self.is_all_ascii(label):
return label.encode('ascii')
if not have_idna_2008:
raise NoIDNA2008
try:
if self.uts_46:
label = idna.uts46_remap(label, False, self.transitional)
return idna.alabel(label)
except idna.IDNAError as e:
raise IDNAException(idna_exception=e)
def decode(self, label):
if not self.strict_decode:
return super(IDNA2008Codec, self).decode(label)
if label == b'':
return u''
if not have_idna_2008:
raise NoIDNA2008
try:
if self.uts_46:
label = idna.uts46_remap(label, False, False)
return _escapify(idna.ulabel(label), True)
except idna.IDNAError as e:
raise IDNAException(idna_exception=e)
_escaped = bytearray(b'"().;\\@$')
IDNA_2003_Practical = IDNA2003Codec(False)
IDNA_2003_Strict = IDNA2003Codec(True)
IDNA_2003 = IDNA_2003_Practical
IDNA_2008_Practical = IDNA2008Codec(True, False, True, False)
IDNA_2008_UTS_46 = IDNA2008Codec(True, False, False, False)
IDNA_2008_Strict = IDNA2008Codec(False, False, False, True)
IDNA_2008_Transitional = IDNA2008Codec(True, True, False, False)
IDNA_2008 = IDNA_2008_Practical
def _escapify(label, unicode_mode=False):
"""Escape the characters in label which need it.
@param unicode_mode: escapify only special and whitespace (<= 0x20)
characters
@returns: the escaped string
@rtype: string"""
if not unicode_mode:
text = ''
if isinstance(label, text_type):
label = label.encode()
for c in bytearray(label):
if c in _escaped:
text += '\\' + chr(c)
elif c > 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text.encode()
text = u''
if isinstance(label, binary_type):
label = label.decode()
for c in label:
if c > u'\x20' and c < u'\x7f':
text += c
else:
if c >= u'\x7f':
text += c
else:
text += u'\\%03d' % ord(c)
return text
def _validate_labels(labels):
"""Check for empty labels in the middle of a label sequence,
labels that are too long, and for too many labels.
Raises ``dns.name.NameTooLong`` if the name as a whole is too long.
Raises ``dns.name.EmptyLabel`` if a label is empty (i.e. the root
label) and appears in a position other than the end of the label
sequence
"""
l = len(labels)
total = 0
i = -1
j = 0
for label in labels:
ll = len(label)
total += ll + 1
if ll > 63:
raise LabelTooLong
if i < 0 and label == b'':
i = j
j += 1
if total > 255:
raise NameTooLong
if i >= 0 and i != l - 1:
raise EmptyLabel
def _maybe_convert_to_binary(label):
"""If label is ``text``, convert it to ``binary``. If it is already
``binary`` just return it.
"""
if isinstance(label, binary_type):
return label
if isinstance(label, text_type):
return label.encode()
raise ValueError
class Name(object):
"""A DNS name.
The dns.name.Name class represents a DNS name as a tuple of
labels. Each label is a `binary` in DNS wire format. Instances
of the class are immutable.
"""
__slots__ = ['labels']
def __init__(self, labels):
"""*labels* is any iterable whose values are ``text`` or ``binary``.
"""
labels = [_maybe_convert_to_binary(x) for x in labels]
super(Name, self).__setattr__('labels', tuple(labels))
_validate_labels(self.labels)
def __setattr__(self, name, value):
# Names are immutable
raise TypeError("object doesn't support attribute assignment")
def __copy__(self):
return Name(self.labels)
def __deepcopy__(self, memo):
return Name(copy.deepcopy(self.labels, memo))
def __getstate__(self):
# Names can be pickled
return {'labels': self.labels}
def __setstate__(self, state):
super(Name, self).__setattr__('labels', state['labels'])
_validate_labels(self.labels)
def is_absolute(self):
"""Is the most significant label of this name the root label?
Returns a ``bool``.
"""
return len(self.labels) > 0 and self.labels[-1] == b''
def is_wild(self):
"""Is this name wild? (I.e. Is the least significant label '*'?)
Returns a ``bool``.
"""
return len(self.labels) > 0 and self.labels[0] == b'*'
def __hash__(self):
"""Return a case-insensitive hash of the name.
Returns an ``int``.
"""
h = long(0)
for label in self.labels:
for c in bytearray(label.lower()):
h += (h << 3) + c
return int(h % maxint)
def fullcompare(self, other):
"""Compare two names, returning a 3-tuple
``(relation, order, nlabels)``.
*relation* describes the relation ship between the names,
and is one of: ``dns.name.NAMERELN_NONE``,
``dns.name.NAMERELN_SUPERDOMAIN``, ``dns.name.NAMERELN_SUBDOMAIN``,
``dns.name.NAMERELN_EQUAL``, or ``dns.name.NAMERELN_COMMONANCESTOR``.
*order* is < 0 if *self* < *other*, > 0 if *self* > *other*, and ==
0 if *self* == *other*. A relative name is always less than an
absolute name. If both names have the same relativity, then
the DNSSEC order relation is used to order them.
*nlabels* is the number of significant labels that the two names
have in common.
Here are some examples. Names ending in "." are absolute names,
those not ending in "." are relative names.
============= ============= =========== ===== =======
self other relation order nlabels
============= ============= =========== ===== =======
www.example. www.example. equal 0 3
www.example. example. subdomain > 0 2
example. www.example. superdomain < 0 2
example1.com. example2.com. common anc. < 0 2
example1 example2. none < 0 0
example1. example2 none > 0 0
============= ============= =========== ===== =======
"""
sabs = self.is_absolute()
oabs = other.is_absolute()
if sabs != oabs:
if sabs:
return (NAMERELN_NONE, 1, 0)
else:
return (NAMERELN_NONE, -1, 0)
l1 = len(self.labels)
l2 = len(other.labels)
ldiff = l1 - l2
if ldiff < 0:
l = l1
else:
l = l2
order = 0
nlabels = 0
namereln = NAMERELN_NONE
while l > 0:
l -= 1
l1 -= 1
l2 -= 1
label1 = self.labels[l1].lower()
label2 = other.labels[l2].lower()
if label1 < label2:
order = -1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
elif label1 > label2:
order = 1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
nlabels += 1
order = ldiff
if ldiff < 0:
namereln = NAMERELN_SUPERDOMAIN
elif ldiff > 0:
namereln = NAMERELN_SUBDOMAIN
else:
namereln = NAMERELN_EQUAL
return (namereln, order, nlabels)
def is_subdomain(self, other):
"""Is self a subdomain of other?
Note that the notion of subdomain includes equality, e.g.
"dnpython.org" is a subdomain of itself.
Returns a ``bool``.
"""
(nr, o, nl) = self.fullcompare(other)
if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
return True
return False
def is_superdomain(self, other):
"""Is self a superdomain of other?
Note that the notion of superdomain includes equality, e.g.
"dnpython.org" is a superdomain of itself.
Returns a ``bool``.
"""
(nr, o, nl) = self.fullcompare(other)
if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
return True
return False
def canonicalize(self):
"""Return a name which is equal to the current name, but is in
DNSSEC canonical form.
"""
return Name([x.lower() for x in self.labels])
def __eq__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] == 0
else:
return False
def __ne__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] != 0
else:
return True
def __lt__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] < 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] <= 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] > 0
else:
return NotImplemented
def __repr__(self):
return '<DNS name ' + self.__str__() + '>'
def __str__(self):
return self.to_text(False)
def to_text(self, omit_final_dot=False):
"""Convert name to DNS text format.
*omit_final_dot* is a ``bool``. If True, don't emit the final
dot (denoting the root label) for absolute names. The default
is False.
Returns a ``text``.
"""
if len(self.labels) == 0:
return maybe_decode(b'@')
if len(self.labels) == 1 and self.labels[0] == b'':
return maybe_decode(b'.')
if omit_final_dot and self.is_absolute():
l = self.labels[:-1]
else:
l = self.labels
s = b'.'.join(map(_escapify, l))
return maybe_decode(s)
def to_unicode(self, omit_final_dot=False, idna_codec=None):
"""Convert name to Unicode text format.
IDN ACE labels are converted to Unicode.
*omit_final_dot* is a ``bool``. If True, don't emit the final
dot (denoting the root label) for absolute names. The default
is False.
*idna_codec* specifies the IDNA encoder/decoder. If None, the
dns.name.IDNA_2003_Practical encoder/decoder is used.
The IDNA_2003_Practical decoder does
not impose any policy, it just decodes punycode, so if you
don't want checking for compliance, you can use this decoder
for IDNA2008 as well.
Returns a ``text``.
"""
if len(self.labels) == 0:
return u'@'
if len(self.labels) == 1 and self.labels[0] == b'':
return u'.'
if omit_final_dot and self.is_absolute():
l = self.labels[:-1]
else:
l = self.labels
if idna_codec is None:
idna_codec = IDNA_2003_Practical
return u'.'.join([idna_codec.decode(x) for x in l])
def to_digestable(self, origin=None):
"""Convert name to a format suitable for digesting in hashes.
The name is canonicalized and converted to uncompressed wire
format. All names in wire format are absolute. If the name
is a relative name, then an origin must be supplied.
*origin* is a ``dns.name.Name`` or ``None``. If the name is
relative and origin is not ``None``, then origin will be appended
to the name.
Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
relative and no origin was provided.
Returns a ``binary``.
"""
if not self.is_absolute():
if origin is None or not origin.is_absolute():
raise NeedAbsoluteNameOrOrigin
labels = list(self.labels)
labels.extend(list(origin.labels))
else:
labels = self.labels
dlabels = [struct.pack('!B%ds' % len(x), len(x), x.lower())
for x in labels]
return b''.join(dlabels)
def to_wire(self, file=None, compress=None, origin=None):
"""Convert name to wire format, possibly compressing it.
*file* is the file where the name is emitted (typically a
BytesIO file). If ``None`` (the default), a ``binary``
containing the wire name will be returned.
*compress*, a ``dict``, is the compression table to use. If
``None`` (the default), names will not be compressed.
*origin* is a ``dns.name.Name`` or ``None``. If the name is
relative and origin is not ``None``, then *origin* will be appended
to it.
Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
relative and no origin was provided.
Returns a ``binary`` or ``None``.
"""
if file is None:
file = BytesIO()
want_return = True
else:
want_return = False
if not self.is_absolute():
if origin is None or not origin.is_absolute():
raise NeedAbsoluteNameOrOrigin
labels = list(self.labels)
labels.extend(list(origin.labels))
else:
labels = self.labels
i = 0
for label in labels:
n = Name(labels[i:])
i += 1
if compress is not None:
pos = compress.get(n)
else:
pos = None
if pos is not None:
value = 0xc000 + pos
s = struct.pack('!H', value)
file.write(s)
break
else:
if compress is not None and len(n) > 1:
pos = file.tell()
if pos <= 0x3fff:
compress[n] = pos
l = len(label)
file.write(struct.pack('!B', l))
if l > 0:
file.write(label)
if want_return:
return file.getvalue()
def __len__(self):
"""The length of the name (in labels).
Returns an ``int``.
"""
return len(self.labels)
def __getitem__(self, index):
return self.labels[index]
def __add__(self, other):
return self.concatenate(other)
def __sub__(self, other):
return self.relativize(other)
def split(self, depth):
"""Split a name into a prefix and suffix names at the specified depth.
*depth* is an ``int`` specifying the number of labels in the suffix
Raises ``ValueError`` if *depth* was not >= 0 and <= the length of the
name.
Returns the tuple ``(prefix, suffix)``.
"""
l = len(self.labels)
if depth == 0:
return (self, dns.name.empty)
elif depth == l:
return (dns.name.empty, self)
elif depth < 0 or depth > l:
raise ValueError(
'depth must be >= 0 and <= the length of the name')
return (Name(self[: -depth]), Name(self[-depth:]))
def concatenate(self, other):
"""Return a new name which is the concatenation of self and other.
Raises ``dns.name.AbsoluteConcatenation`` if the name is
absolute and *other* is not the empty name.
Returns a ``dns.name.Name``.
"""
if self.is_absolute() and len(other) > 0:
raise AbsoluteConcatenation
labels = list(self.labels)
labels.extend(list(other.labels))
return Name(labels)
def relativize(self, origin):
"""If the name is a subdomain of *origin*, return a new name which is
the name relative to origin. Otherwise return the name.
For example, relativizing ``www.dnspython.org.`` to origin
``dnspython.org.`` returns the name ``www``. Relativizing ``example.``
to origin ``dnspython.org.`` returns ``example.``.
Returns a ``dns.name.Name``.
"""
if origin is not None and self.is_subdomain(origin):
return Name(self[: -len(origin)])
else:
return self
def derelativize(self, origin):
"""If the name is a relative name, return a new name which is the
concatenation of the name and origin. Otherwise return the name.
For example, derelativizing ``www`` to origin ``dnspython.org.``
returns the name ``www.dnspython.org.``. Derelativizing ``example.``
to origin ``dnspython.org.`` returns ``example.``.
Returns a ``dns.name.Name``.
"""
if not self.is_absolute():
return self.concatenate(origin)
else:
return self
def choose_relativity(self, origin=None, relativize=True):
"""Return a name with the relativity desired by the caller.
If *origin* is ``None``, then the name is returned.
Otherwise, if *relativize* is ``True`` the name is
relativized, and if *relativize* is ``False`` the name is
derelativized.
Returns a ``dns.name.Name``.
"""
if origin:
if relativize:
return self.relativize(origin)
else:
return self.derelativize(origin)
else:
return self
def parent(self):
"""Return the parent of the name.
For example, the parent of ``www.dnspython.org.`` is ``dnspython.org``.
Raises ``dns.name.NoParent`` if the name is either the root name or the
empty name, and thus has no parent.
Returns a ``dns.name.Name``.
"""
if self == root or self == empty:
raise NoParent
return Name(self.labels[1:])
#: The root name, '.'
root = Name([b''])
#: The empty name.
empty = Name([])
def from_unicode(text, origin=root, idna_codec=None):
"""Convert unicode text into a Name object.
Labels are encoded in IDN ACE form according to rules specified by
the IDNA codec.
*text*, a ``text``, is the text to convert into a name.
*origin*, a ``dns.name.Name``, specifies the origin to
append to non-absolute names. The default is the root name.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used.
Returns a ``dns.name.Name``.
"""
if not isinstance(text, text_type):
raise ValueError("input to from_unicode() must be a unicode string")
if not (origin is None or isinstance(origin, Name)):
raise ValueError("origin must be a Name or None")
labels = []
label = u''
escaping = False
edigits = 0
total = 0
if idna_codec is None:
idna_codec = IDNA_2003
if text == u'@':
text = u''
if text:
if text == u'.':
return Name([b'']) # no Unicode "u" on this constant!
for c in text:
if escaping:
if edigits == 0:
if c.isdigit():
total = int(c)
edigits += 1
else:
label += c
escaping = False
else:
if not c.isdigit():
raise BadEscape
total *= 10
total += int(c)
edigits += 1
if edigits == 3:
escaping = False
label += unichr(total)
elif c in [u'.', u'\u3002', u'\uff0e', u'\uff61']:
if len(label) == 0:
raise EmptyLabel
labels.append(idna_codec.encode(label))
label = u''
elif c == u'\\':
escaping = True
edigits = 0
total = 0
else:
label += c
if escaping:
raise BadEscape
if len(label) > 0:
labels.append(idna_codec.encode(label))
else:
labels.append(b'')
if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
labels.extend(list(origin.labels))
return Name(labels)
def from_text(text, origin=root, idna_codec=None):
"""Convert text into a Name object.
*text*, a ``text``, is the text to convert into a name.
*origin*, a ``dns.name.Name``, specifies the origin to
append to non-absolute names. The default is the root name.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used.
Returns a ``dns.name.Name``.
"""
if isinstance(text, text_type):
return from_unicode(text, origin, idna_codec)
if not isinstance(text, binary_type):
raise ValueError("input to from_text() must be a string")
if not (origin is None or isinstance(origin, Name)):
raise ValueError("origin must be a Name or None")
labels = []
label = b''
escaping = False
edigits = 0
total = 0
if text == b'@':
text = b''
if text:
if text == b'.':
return Name([b''])
for c in bytearray(text):
byte_ = struct.pack('!B', c)
if escaping:
if edigits == 0:
if byte_.isdigit():
total = int(byte_)
edigits += 1
else:
label += byte_
escaping = False
else:
if not byte_.isdigit():
raise BadEscape
total *= 10
total += int(byte_)
edigits += 1
if edigits == 3:
escaping = False
label += struct.pack('!B', total)
elif byte_ == b'.':
if len(label) == 0:
raise EmptyLabel
labels.append(label)
label = b''
elif byte_ == b'\\':
escaping = True
edigits = 0
total = 0
else:
label += byte_
if escaping:
raise BadEscape
if len(label) > 0:
labels.append(label)
else:
labels.append(b'')
if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
labels.extend(list(origin.labels))
return Name(labels)
def from_wire(message, current):
"""Convert possibly compressed wire format into a Name.
*message* is a ``binary`` containing an entire DNS message in DNS
wire form.
*current*, an ``int``, is the offset of the beginning of the name
from the start of the message
Raises ``dns.name.BadPointer`` if a compression pointer did not
point backwards in the message.
Raises ``dns.name.BadLabelType`` if an invalid label type was encountered.
Returns a ``(dns.name.Name, int)`` tuple consisting of the name
that was read and the number of bytes of the wire format message
which were consumed reading it.
"""
if not isinstance(message, binary_type):
raise ValueError("input to from_wire() must be a byte string")
message = dns.wiredata.maybe_wrap(message)
labels = []
biggest_pointer = current
hops = 0
count = message[current]
current += 1
cused = 1
while count != 0:
if count < 64:
labels.append(message[current: current + count].unwrap())
current += count
if hops == 0:
cused += count
elif count >= 192:
current = (count & 0x3f) * 256 + message[current]
if hops == 0:
cused += 1
if current >= biggest_pointer:
raise BadPointer
biggest_pointer = current
hops += 1
else:
raise BadLabelType
count = message[current]
current += 1
if hops == 0:
cused += 1
labels.append('')
return (Name(labels), cused)

35
lib/dns/name.pyi Normal file
View File

@@ -0,0 +1,35 @@
from typing import Optional, Union, Tuple, Iterable, List
class Name:
def is_subdomain(self, o : Name) -> bool: ...
def is_superdomain(self, o : Name) -> bool: ...
def __init__(self, labels : Iterable[Union[bytes,str]]) -> None:
self.labels : List[bytes]
def is_absolute(self) -> bool: ...
def is_wild(self) -> bool: ...
def fullcompare(self, other) -> Tuple[int,int,int]: ...
def canonicalize(self) -> Name: ...
def __lt__(self, other : Name): ...
def __le__(self, other : Name): ...
def __ge__(self, other : Name): ...
def __gt__(self, other : Name): ...
def to_text(self, omit_final_dot=False) -> str: ...
def to_unicode(self, omit_final_dot=False, idna_codec=None) -> str: ...
def to_digestable(self, origin=None) -> bytes: ...
def to_wire(self, file=None, compress=None, origin=None) -> Optional[bytes]: ...
def __add__(self, other : Name): ...
def __sub__(self, other : Name): ...
def split(self, depth) -> List[Tuple[str,str]]: ...
def concatenate(self, other : Name) -> Name: ...
def relativize(self, origin): ...
def derelativize(self, origin): ...
def choose_relativity(self, origin : Optional[Name] = None, relativize=True): ...
def parent(self) -> Name: ...
class IDNACodec:
pass
def from_text(text, origin : Optional[Name] = Name('.'), idna_codec : Optional[IDNACodec] = None) -> Name:
...
empty : Name

108
lib/dns/namedict.py Normal file
View File

@@ -0,0 +1,108 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
# Copyright (C) 2016 Coresec Systems AB
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC
# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS name dictionary"""
import collections
import dns.name
from ._compat import xrange
class NameDict(collections.MutableMapping):
"""A dictionary whose keys are dns.name.Name objects.
In addition to being like a regular Python dictionary, this
dictionary can also get the deepest match for a given key.
"""
__slots__ = ["max_depth", "max_depth_items", "__store"]
def __init__(self, *args, **kwargs):
super(NameDict, self).__init__()
self.__store = dict()
#: the maximum depth of the keys that have ever been added
self.max_depth = 0
#: the number of items of maximum depth
self.max_depth_items = 0
self.update(dict(*args, **kwargs))
def __update_max_depth(self, key):
if len(key) == self.max_depth:
self.max_depth_items = self.max_depth_items + 1
elif len(key) > self.max_depth:
self.max_depth = len(key)
self.max_depth_items = 1
def __getitem__(self, key):
return self.__store[key]
def __setitem__(self, key, value):
if not isinstance(key, dns.name.Name):
raise ValueError('NameDict key must be a name')
self.__store[key] = value
self.__update_max_depth(key)
def __delitem__(self, key):
value = self.__store.pop(key)
if len(value) == self.max_depth:
self.max_depth_items = self.max_depth_items - 1
if self.max_depth_items == 0:
self.max_depth = 0
for k in self.__store:
self.__update_max_depth(k)
def __iter__(self):
return iter(self.__store)
def __len__(self):
return len(self.__store)
def has_key(self, key):
return key in self.__store
def get_deepest_match(self, name):
"""Find the deepest match to *fname* in the dictionary.
The deepest match is the longest name in the dictionary which is
a superdomain of *name*. Note that *superdomain* includes matching
*name* itself.
*name*, a ``dns.name.Name``, the name to find.
Returns a ``(key, value)`` where *key* is the deepest
``dns.name.Name``, and *value* is the value associated with *key*.
"""
depth = len(name)
if depth > self.max_depth:
depth = self.max_depth
for i in xrange(-depth, 0):
n = dns.name.Name(name[i:])
if n in self:
return (n, self[n])
v = self[dns.name.empty]
return (dns.name.empty, v)

182
lib/dns/node.py Normal file
View File

@@ -0,0 +1,182 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS nodes. A node is a set of rdatasets."""
from io import StringIO
import dns.rdataset
import dns.rdatatype
import dns.renderer
class Node(object):
"""A Node is a set of rdatasets."""
__slots__ = ['rdatasets']
def __init__(self):
#: the set of rdatsets, represented as a list.
self.rdatasets = []
def to_text(self, name, **kw):
"""Convert a node to text format.
Each rdataset at the node is printed. Any keyword arguments
to this method are passed on to the rdataset's to_text() method.
*name*, a ``dns.name.Name`` or ``text``, the owner name of the rdatasets.
Returns a ``text``.
"""
s = StringIO()
for rds in self.rdatasets:
if len(rds) > 0:
s.write(rds.to_text(name, **kw))
s.write(u'\n')
return s.getvalue()[:-1]
def __repr__(self):
return '<DNS node ' + str(id(self)) + '>'
def __eq__(self, other):
#
# This is inefficient. Good thing we don't need to do it much.
#
for rd in self.rdatasets:
if rd not in other.rdatasets:
return False
for rd in other.rdatasets:
if rd not in self.rdatasets:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.rdatasets)
def __iter__(self):
return iter(self.rdatasets)
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Find an rdataset matching the specified properties in the
current node.
*rdclass*, an ``int``, the class of the rdataset.
*rdtype*, an ``int``, the type of the rdataset.
*covers*, an ``int``, the covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
*create*, a ``bool``. If True, create the rdataset if it is not found.
Raises ``KeyError`` if an rdataset of the desired type and class does
not exist and *create* is not ``True``.
Returns a ``dns.rdataset.Rdataset``.
"""
for rds in self.rdatasets:
if rds.match(rdclass, rdtype, covers):
return rds
if not create:
raise KeyError
rds = dns.rdataset.Rdataset(rdclass, rdtype)
self.rdatasets.append(rds)
return rds
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and *create* is not ``True``.
*rdclass*, an ``int``, the class of the rdataset.
*rdtype*, an ``int``, the type of the rdataset.
*covers*, an ``int``, the covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
*create*, a ``bool``. If True, create the rdataset if it is not found.
Returns a ``dns.rdataset.Rdataset`` or ``None``.
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching the specified properties in the
current node.
If a matching rdataset does not exist, it is not an error.
*rdclass*, an ``int``, the class of the rdataset.
*rdtype*, an ``int``, the type of the rdataset.
*covers*, an ``int``, the covered type.
"""
rds = self.get_rdataset(rdclass, rdtype, covers)
if rds is not None:
self.rdatasets.remove(rds)
def replace_rdataset(self, replacement):
"""Replace an rdataset.
It is not an error if there is no rdataset matching *replacement*.
Ownership of the *replacement* object is transferred to the node;
in other words, this method does not store a copy of *replacement*
at the node, it stores *replacement* itself.
*replacement*, a ``dns.rdataset.Rdataset``.
Raises ``ValueError`` if *replacement* is not a
``dns.rdataset.Rdataset``.
"""
if not isinstance(replacement, dns.rdataset.Rdataset):
raise ValueError('replacement is not an rdataset')
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
replacement.covers)
self.rdatasets.append(replacement)

17
lib/dns/node.pyi Normal file
View File

@@ -0,0 +1,17 @@
from typing import List, Optional, Union
from . import rdataset, rdatatype, name
class Node:
def __init__(self):
self.rdatasets : List[rdataset.Rdataset]
def to_text(self, name : Union[str,name.Name], **kw) -> str:
...
def find_rdataset(self, rdclass : int, rdtype : int, covers=rdatatype.NONE,
create=False) -> rdataset.Rdataset:
...
def get_rdataset(self, rdclass : int, rdtype : int, covers=rdatatype.NONE,
create=False) -> Optional[rdataset.Rdataset]:
...
def delete_rdataset(self, rdclass : int, rdtype : int, covers=rdatatype.NONE):
...
def replace_rdataset(self, replacement : rdataset.Rdataset) -> None:
...

119
lib/dns/opcode.py Normal file
View File

@@ -0,0 +1,119 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Opcodes."""
import dns.exception
#: Query
QUERY = 0
#: Inverse Query (historical)
IQUERY = 1
#: Server Status (unspecified and unimplemented anywhere)
STATUS = 2
#: Notify
NOTIFY = 4
#: Dynamic Update
UPDATE = 5
_by_text = {
'QUERY': QUERY,
'IQUERY': IQUERY,
'STATUS': STATUS,
'NOTIFY': NOTIFY,
'UPDATE': UPDATE
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = {y: x for x, y in _by_text.items()}
class UnknownOpcode(dns.exception.DNSException):
"""An DNS opcode is unknown."""
def from_text(text):
"""Convert text into an opcode.
*text*, a ``text``, the textual opcode
Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
Returns an ``int``.
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value
def from_flags(flags):
"""Extract an opcode from DNS message flags.
*flags*, an ``int``, the DNS flags.
Returns an ``int``.
"""
return (flags & 0x7800) >> 11
def to_flags(value):
"""Convert an opcode to a value suitable for ORing into DNS message
flags.
*value*, an ``int``, the DNS opcode value.
Returns an ``int``.
"""
return (value << 11) & 0x7800
def to_text(value):
"""Convert an opcode to text.
*value*, an ``int`` the opcode value,
Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
Returns a ``text``.
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
def is_update(flags):
"""Is the opcode in flags UPDATE?
*flags*, an ``int``, the DNS message flags.
Returns a ``bool``.
"""
return from_flags(flags) == UPDATE

0
lib/dns/py.typed Normal file
View File

683
lib/dns/query.py Normal file
View File

@@ -0,0 +1,683 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Talk to a DNS server."""
from __future__ import generators
import errno
import select
import socket
import struct
import sys
import time
import dns.exception
import dns.inet
import dns.name
import dns.message
import dns.rcode
import dns.rdataclass
import dns.rdatatype
from ._compat import long, string_types, PY3
if PY3:
select_error = OSError
else:
select_error = select.error
# Function used to create a socket. Can be overridden if needed in special
# situations.
socket_factory = socket.socket
class UnexpectedSource(dns.exception.DNSException):
"""A DNS query response came from an unexpected address or port."""
class BadResponse(dns.exception.FormError):
"""A DNS query response does not respond to the question asked."""
class TransferError(dns.exception.DNSException):
"""A zone transfer response got a non-zero rcode."""
def __init__(self, rcode):
message = 'Zone transfer error: %s' % dns.rcode.to_text(rcode)
super(TransferError, self).__init__(message)
self.rcode = rcode
def _compute_expiration(timeout):
if timeout is None:
return None
else:
return time.time() + timeout
# This module can use either poll() or select() as the "polling backend".
#
# A backend function takes an fd, bools for readability, writablity, and
# error detection, and a timeout.
def _poll_for(fd, readable, writable, error, timeout):
"""Poll polling backend."""
event_mask = 0
if readable:
event_mask |= select.POLLIN
if writable:
event_mask |= select.POLLOUT
if error:
event_mask |= select.POLLERR
pollable = select.poll()
pollable.register(fd, event_mask)
if timeout:
event_list = pollable.poll(long(timeout * 1000))
else:
event_list = pollable.poll()
return bool(event_list)
def _select_for(fd, readable, writable, error, timeout):
"""Select polling backend."""
rset, wset, xset = [], [], []
if readable:
rset = [fd]
if writable:
wset = [fd]
if error:
xset = [fd]
if timeout is None:
(rcount, wcount, xcount) = select.select(rset, wset, xset)
else:
(rcount, wcount, xcount) = select.select(rset, wset, xset, timeout)
return bool((rcount or wcount or xcount))
def _wait_for(fd, readable, writable, error, expiration):
# Use the selected polling backend to wait for any of the specified
# events. An "expiration" absolute time is converted into a relative
# timeout.
done = False
while not done:
if expiration is None:
timeout = None
else:
timeout = expiration - time.time()
if timeout <= 0.0:
raise dns.exception.Timeout
try:
if not _polling_backend(fd, readable, writable, error, timeout):
raise dns.exception.Timeout
except select_error as e:
if e.args[0] != errno.EINTR:
raise e
done = True
def _set_polling_backend(fn):
# Internal API. Do not use.
global _polling_backend
_polling_backend = fn
if hasattr(select, 'poll'):
# Prefer poll() on platforms that support it because it has no
# limits on the maximum value of a file descriptor (plus it will
# be more efficient for high values).
_polling_backend = _poll_for
else:
_polling_backend = _select_for
def _wait_for_readable(s, expiration):
_wait_for(s, True, False, True, expiration)
def _wait_for_writable(s, expiration):
_wait_for(s, False, True, True, expiration)
def _addresses_equal(af, a1, a2):
# Convert the first value of the tuple, which is a textual format
# address into binary form, so that we are not confused by different
# textual representations of the same address
try:
n1 = dns.inet.inet_pton(af, a1[0])
n2 = dns.inet.inet_pton(af, a2[0])
except dns.exception.SyntaxError:
return False
return n1 == n2 and a1[1:] == a2[1:]
def _destination_and_source(af, where, port, source, source_port):
# Apply defaults and compute destination and source tuples
# suitable for use in connect(), sendto(), or bind().
if af is None:
try:
af = dns.inet.af_for_address(where)
except Exception:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None or source_port != 0:
if source is None:
source = '0.0.0.0'
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None or source_port != 0:
if source is None:
source = '::'
source = (source, source_port, 0, 0)
return (af, destination, source)
def send_udp(sock, what, destination, expiration=None):
"""Send a DNS message to the specified UDP socket.
*sock*, a ``socket``.
*what*, a ``binary`` or ``dns.message.Message``, the message to send.
*destination*, a destination tuple appropriate for the address family
of the socket, specifying where to send the query.
*expiration*, a ``float`` or ``None``, the absolute time at which
a timeout exception should be raised. If ``None``, no timeout will
occur.
Returns an ``(int, float)`` tuple of bytes sent and the sent time.
"""
if isinstance(what, dns.message.Message):
what = what.to_wire()
_wait_for_writable(sock, expiration)
sent_time = time.time()
n = sock.sendto(what, destination)
return (n, sent_time)
def receive_udp(sock, destination, expiration=None,
ignore_unexpected=False, one_rr_per_rrset=False,
keyring=None, request_mac=b'', ignore_trailing=False):
"""Read a DNS message from a UDP socket.
*sock*, a ``socket``.
*destination*, a destination tuple appropriate for the address family
of the socket, specifying where the associated query was sent.
*expiration*, a ``float`` or ``None``, the absolute time at which
a timeout exception should be raised. If ``None``, no timeout will
occur.
*ignore_unexpected*, a ``bool``. If ``True``, ignore responses from
unexpected sources.
*one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
RRset.
*keyring*, a ``dict``, the keyring to use for TSIG.
*request_mac*, a ``binary``, the MAC of the request (for TSIG).
*ignore_trailing*, a ``bool``. If ``True``, ignore trailing
junk at end of the received message.
Raises if the message is malformed, if network errors occur, of if
there is a timeout.
Returns a ``dns.message.Message`` object.
"""
wire = b''
while 1:
_wait_for_readable(sock, expiration)
(wire, from_address) = sock.recvfrom(65535)
if _addresses_equal(sock.family, from_address, destination) or \
(dns.inet.is_multicast(destination[0]) and
from_address[1:] == destination[1:]):
break
if not ignore_unexpected:
raise UnexpectedSource('got a response from '
'%s instead of %s' % (from_address,
destination))
received_time = time.time()
r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac,
one_rr_per_rrset=one_rr_per_rrset,
ignore_trailing=ignore_trailing)
return (r, received_time)
def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
ignore_unexpected=False, one_rr_per_rrset=False, ignore_trailing=False):
"""Return the response obtained after sending a query via UDP.
*q*, a ``dns.message.Message``, the query to send
*where*, a ``text`` containing an IPv4 or IPv6 address, where
to send the message.
*timeout*, a ``float`` or ``None``, the number of seconds to wait before the
query times out. If ``None``, the default, wait forever.
*port*, an ``int``, the port send the message to. The default is 53.
*af*, an ``int``, the address family to use. The default is ``None``,
which causes the address family to use to be inferred from the form of
*where*. If the inference attempt fails, AF_INET is used. This
parameter is historical; you need never set it.
*source*, a ``text`` containing an IPv4 or IPv6 address, specifying
the source address. The default is the wildcard address.
*source_port*, an ``int``, the port from which to send the message.
The default is 0.
*ignore_unexpected*, a ``bool``. If ``True``, ignore responses from
unexpected sources.
*one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
RRset.
*ignore_trailing*, a ``bool``. If ``True``, ignore trailing
junk at end of the received message.
Returns a ``dns.message.Message``.
"""
wire = q.to_wire()
(af, destination, source) = _destination_and_source(af, where, port,
source, source_port)
s = socket_factory(af, socket.SOCK_DGRAM, 0)
received_time = None
sent_time = None
try:
expiration = _compute_expiration(timeout)
s.setblocking(0)
if source is not None:
s.bind(source)
(_, sent_time) = send_udp(s, wire, destination, expiration)
(r, received_time) = receive_udp(s, destination, expiration,
ignore_unexpected, one_rr_per_rrset,
q.keyring, q.mac, ignore_trailing)
finally:
if sent_time is None or received_time is None:
response_time = 0
else:
response_time = received_time - sent_time
s.close()
r.time = response_time
if not q.is_response(r):
raise BadResponse
return r
def _net_read(sock, count, expiration):
"""Read the specified number of bytes from sock. Keep trying until we
either get the desired amount, or we hit EOF.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
s = b''
while count > 0:
_wait_for_readable(sock, expiration)
n = sock.recv(count)
if n == b'':
raise EOFError
count = count - len(n)
s = s + n
return s
def _net_write(sock, data, expiration):
"""Write the specified data to the socket.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
current = 0
l = len(data)
while current < l:
_wait_for_writable(sock, expiration)
current += sock.send(data[current:])
def send_tcp(sock, what, expiration=None):
"""Send a DNS message to the specified TCP socket.
*sock*, a ``socket``.
*what*, a ``binary`` or ``dns.message.Message``, the message to send.
*expiration*, a ``float`` or ``None``, the absolute time at which
a timeout exception should be raised. If ``None``, no timeout will
occur.
Returns an ``(int, float)`` tuple of bytes sent and the sent time.
"""
if isinstance(what, dns.message.Message):
what = what.to_wire()
l = len(what)
# copying the wire into tcpmsg is inefficient, but lets us
# avoid writev() or doing a short write that would get pushed
# onto the net
tcpmsg = struct.pack("!H", l) + what
_wait_for_writable(sock, expiration)
sent_time = time.time()
_net_write(sock, tcpmsg, expiration)
return (len(tcpmsg), sent_time)
def receive_tcp(sock, expiration=None, one_rr_per_rrset=False,
keyring=None, request_mac=b'', ignore_trailing=False):
"""Read a DNS message from a TCP socket.
*sock*, a ``socket``.
*expiration*, a ``float`` or ``None``, the absolute time at which
a timeout exception should be raised. If ``None``, no timeout will
occur.
*one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
RRset.
*keyring*, a ``dict``, the keyring to use for TSIG.
*request_mac*, a ``binary``, the MAC of the request (for TSIG).
*ignore_trailing*, a ``bool``. If ``True``, ignore trailing
junk at end of the received message.
Raises if the message is malformed, if network errors occur, of if
there is a timeout.
Returns a ``dns.message.Message`` object.
"""
ldata = _net_read(sock, 2, expiration)
(l,) = struct.unpack("!H", ldata)
wire = _net_read(sock, l, expiration)
received_time = time.time()
r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac,
one_rr_per_rrset=one_rr_per_rrset,
ignore_trailing=ignore_trailing)
return (r, received_time)
def _connect(s, address):
try:
s.connect(address)
except socket.error:
(ty, v) = sys.exc_info()[:2]
if hasattr(v, 'errno'):
v_err = v.errno
else:
v_err = v[0]
if v_err not in [errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EALREADY]:
raise v
def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
one_rr_per_rrset=False, ignore_trailing=False):
"""Return the response obtained after sending a query via TCP.
*q*, a ``dns.message.Message``, the query to send
*where*, a ``text`` containing an IPv4 or IPv6 address, where
to send the message.
*timeout*, a ``float`` or ``None``, the number of seconds to wait before the
query times out. If ``None``, the default, wait forever.
*port*, an ``int``, the port send the message to. The default is 53.
*af*, an ``int``, the address family to use. The default is ``None``,
which causes the address family to use to be inferred from the form of
*where*. If the inference attempt fails, AF_INET is used. This
parameter is historical; you need never set it.
*source*, a ``text`` containing an IPv4 or IPv6 address, specifying
the source address. The default is the wildcard address.
*source_port*, an ``int``, the port from which to send the message.
The default is 0.
*one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
RRset.
*ignore_trailing*, a ``bool``. If ``True``, ignore trailing
junk at end of the received message.
Returns a ``dns.message.Message``.
"""
wire = q.to_wire()
(af, destination, source) = _destination_and_source(af, where, port,
source, source_port)
s = socket_factory(af, socket.SOCK_STREAM, 0)
begin_time = None
received_time = None
try:
expiration = _compute_expiration(timeout)
s.setblocking(0)
begin_time = time.time()
if source is not None:
s.bind(source)
_connect(s, destination)
send_tcp(s, wire, expiration)
(r, received_time) = receive_tcp(s, expiration, one_rr_per_rrset,
q.keyring, q.mac, ignore_trailing)
finally:
if begin_time is None or received_time is None:
response_time = 0
else:
response_time = received_time - begin_time
s.close()
r.time = response_time
if not q.is_response(r):
raise BadResponse
return r
def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
timeout=None, port=53, keyring=None, keyname=None, relativize=True,
af=None, lifetime=None, source=None, source_port=0, serial=0,
use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
"""Return a generator for the responses to a zone transfer.
*where*. If the inference attempt fails, AF_INET is used. This
parameter is historical; you need never set it.
*zone*, a ``dns.name.Name`` or ``text``, the name of the zone to transfer.
*rdtype*, an ``int`` or ``text``, the type of zone transfer. The
default is ``dns.rdatatype.AXFR``. ``dns.rdatatype.IXFR`` can be
used to do an incremental transfer instead.
*rdclass*, an ``int`` or ``text``, the class of the zone transfer.
The default is ``dns.rdataclass.IN``.
*timeout*, a ``float``, the number of seconds to wait for each
response message. If None, the default, wait forever.
*port*, an ``int``, the port send the message to. The default is 53.
*keyring*, a ``dict``, the keyring to use for TSIG.
*keyname*, a ``dns.name.Name`` or ``text``, the name of the TSIG
key to use.
*relativize*, a ``bool``. If ``True``, all names in the zone will be
relativized to the zone origin. It is essential that the
relativize setting matches the one specified to
``dns.zone.from_xfr()`` if using this generator to make a zone.
*af*, an ``int``, the address family to use. The default is ``None``,
which causes the address family to use to be inferred from the form of
*where*. If the inference attempt fails, AF_INET is used. This
parameter is historical; you need never set it.
*lifetime*, a ``float``, the total number of seconds to spend
doing the transfer. If ``None``, the default, then there is no
limit on the time the transfer may take.
*source*, a ``text`` containing an IPv4 or IPv6 address, specifying
the source address. The default is the wildcard address.
*source_port*, an ``int``, the port from which to send the message.
The default is 0.
*serial*, an ``int``, the SOA serial number to use as the base for
an IXFR diff sequence (only meaningful if *rdtype* is
``dns.rdatatype.IXFR``).
*use_udp*, a ``bool``. If ``True``, use UDP (only meaningful for IXFR).
*keyalgorithm*, a ``dns.name.Name`` or ``text``, the TSIG algorithm to use.
Raises on errors, and so does the generator.
Returns a generator of ``dns.message.Message`` objects.
"""
if isinstance(zone, string_types):
zone = dns.name.from_text(zone)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
q = dns.message.make_query(zone, rdtype, rdclass)
if rdtype == dns.rdatatype.IXFR:
rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
'. . %u 0 0 0 0' % serial)
q.authority.append(rrset)
if keyring is not None:
q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
wire = q.to_wire()
(af, destination, source) = _destination_and_source(af, where, port,
source, source_port)
if use_udp:
if rdtype != dns.rdatatype.IXFR:
raise ValueError('cannot do a UDP AXFR')
s = socket_factory(af, socket.SOCK_DGRAM, 0)
else:
s = socket_factory(af, socket.SOCK_STREAM, 0)
s.setblocking(0)
if source is not None:
s.bind(source)
expiration = _compute_expiration(lifetime)
_connect(s, destination)
l = len(wire)
if use_udp:
_wait_for_writable(s, expiration)
s.send(wire)
else:
tcpmsg = struct.pack("!H", l) + wire
_net_write(s, tcpmsg, expiration)
done = False
delete_mode = True
expecting_SOA = False
soa_rrset = None
if relativize:
origin = zone
oname = dns.name.empty
else:
origin = None
oname = zone
tsig_ctx = None
first = True
while not done:
mexpiration = _compute_expiration(timeout)
if mexpiration is None or mexpiration > expiration:
mexpiration = expiration
if use_udp:
_wait_for_readable(s, expiration)
(wire, from_address) = s.recvfrom(65535)
else:
ldata = _net_read(s, 2, mexpiration)
(l,) = struct.unpack("!H", ldata)
wire = _net_read(s, l, mexpiration)
is_ixfr = (rdtype == dns.rdatatype.IXFR)
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
xfr=True, origin=origin, tsig_ctx=tsig_ctx,
multi=True, first=first,
one_rr_per_rrset=is_ixfr)
rcode = r.rcode()
if rcode != dns.rcode.NOERROR:
raise TransferError(rcode)
tsig_ctx = r.tsig_ctx
first = False
answer_index = 0
if soa_rrset is None:
if not r.answer or r.answer[0].name != oname:
raise dns.exception.FormError(
"No answer or RRset not for qname")
rrset = r.answer[0]
if rrset.rdtype != dns.rdatatype.SOA:
raise dns.exception.FormError("first RRset is not an SOA")
answer_index = 1
soa_rrset = rrset.copy()
if rdtype == dns.rdatatype.IXFR:
if soa_rrset[0].serial <= serial:
#
# We're already up-to-date.
#
done = True
else:
expecting_SOA = True
#
# Process SOAs in the answer section (other than the initial
# SOA in the first message).
#
for rrset in r.answer[answer_index:]:
if done:
raise dns.exception.FormError("answers after final SOA")
if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
if expecting_SOA:
if rrset[0].serial != serial:
raise dns.exception.FormError(
"IXFR base serial mismatch")
expecting_SOA = False
elif rdtype == dns.rdatatype.IXFR:
delete_mode = not delete_mode
#
# If this SOA RRset is equal to the first we saw then we're
# finished. If this is an IXFR we also check that we're seeing
# the record in the expected part of the response.
#
if rrset == soa_rrset and \
(rdtype == dns.rdatatype.AXFR or
(rdtype == dns.rdatatype.IXFR and delete_mode)):
done = True
elif expecting_SOA:
#
# We made an IXFR request and are expecting another
# SOA RR, but saw something else, so this must be an
# AXFR response.
#
rdtype = dns.rdatatype.AXFR
expecting_SOA = False
if done and q.keyring and not r.had_tsig:
raise dns.exception.FormError("missing TSIG")
yield r
s.close()

15
lib/dns/query.pyi Normal file
View File

@@ -0,0 +1,15 @@
from typing import Optional, Union, Dict, Generator, Any
from . import message, tsig, rdatatype, rdataclass, name, message
def tcp(q : message.Message, where : str, timeout : float = None, port=53, af : Optional[int] = None, source : Optional[str] = None, source_port : int = 0,
one_rr_per_rrset=False) -> message.Message:
pass
def xfr(where : None, zone : Union[name.Name,str], rdtype=rdatatype.AXFR, rdclass=rdataclass.IN,
timeout : Optional[float] =None, port=53, keyring : Optional[Dict[name.Name, bytes]] =None, keyname : Union[str,name.Name]=None, relativize=True,
af : Optional[int] =None, lifetime : Optional[float]=None, source : Optional[str] =None, source_port=0, serial=0,
use_udp=False, keyalgorithm=tsig.default_algorithm) -> Generator[Any,Any,message.Message]:
pass
def udp(q : message.Message, where : str, timeout : Optional[float] = None, port=53, af : Optional[int] = None, source : Optional[str] = None, source_port=0,
ignore_unexpected=False, one_rr_per_rrset=False) -> message.Message:
...

144
lib/dns/rcode.py Normal file
View File

@@ -0,0 +1,144 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Result Codes."""
import dns.exception
from ._compat import long
#: No error
NOERROR = 0
#: Form error
FORMERR = 1
#: Server failure
SERVFAIL = 2
#: Name does not exist ("Name Error" in RFC 1025 terminology).
NXDOMAIN = 3
#: Not implemented
NOTIMP = 4
#: Refused
REFUSED = 5
#: Name exists.
YXDOMAIN = 6
#: RRset exists.
YXRRSET = 7
#: RRset does not exist.
NXRRSET = 8
#: Not authoritative.
NOTAUTH = 9
#: Name not in zone.
NOTZONE = 10
#: Bad EDNS version.
BADVERS = 16
_by_text = {
'NOERROR': NOERROR,
'FORMERR': FORMERR,
'SERVFAIL': SERVFAIL,
'NXDOMAIN': NXDOMAIN,
'NOTIMP': NOTIMP,
'REFUSED': REFUSED,
'YXDOMAIN': YXDOMAIN,
'YXRRSET': YXRRSET,
'NXRRSET': NXRRSET,
'NOTAUTH': NOTAUTH,
'NOTZONE': NOTZONE,
'BADVERS': BADVERS
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be a true inverse.
_by_value = {y: x for x, y in _by_text.items()}
class UnknownRcode(dns.exception.DNSException):
"""A DNS rcode is unknown."""
def from_text(text):
"""Convert text into an rcode.
*text*, a ``text``, the textual rcode or an integer in textual form.
Raises ``dns.rcode.UnknownRcode`` if the rcode mnemonic is unknown.
Returns an ``int``.
"""
if text.isdigit():
v = int(text)
if v >= 0 and v <= 4095:
return v
v = _by_text.get(text.upper())
if v is None:
raise UnknownRcode
return v
def from_flags(flags, ednsflags):
"""Return the rcode value encoded by flags and ednsflags.
*flags*, an ``int``, the DNS flags field.
*ednsflags*, an ``int``, the EDNS flags field.
Raises ``ValueError`` if rcode is < 0 or > 4095
Returns an ``int``.
"""
value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
return value
def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
*value*, an ``int``, the rcode.
Raises ``ValueError`` if rcode is < 0 or > 4095.
Returns an ``(int, int)`` tuple.
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v = value & 0xf
ev = long(value & 0xff0) << 20
return (v, ev)
def to_text(value):
"""Convert rcode into text.
*value*, and ``int``, the rcode.
Raises ``ValueError`` if rcode is < 0 or > 4095.
Returns a ``text``.
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
text = _by_value.get(value)
if text is None:
text = str(value)
return text

456
lib/dns/rdata.py Normal file
View File

@@ -0,0 +1,456 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata."""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of chunksize characters separated by a space.
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of chunksize characters separated by a space.
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it."""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types."""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
*rdclass*, an ``int`` is the rdataclass of the Rdata.
*rdtype*, an ``int`` is the rdatatype of the Rdata.
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""Return the type a Rdata covers.
DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
Returns an ``int``.
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
Returns an ``int``.
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
Returns a ``text``.
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
Returns a ``binary``.
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form.
Returns a ``binary``.
"""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid.
If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
Raises various exceptions if there are problems.
Returns ``None``.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass.
Return < 0 if self < other in the DNSSEC ordering, 0 if self
== other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
elif our > their:
return 1
else:
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
class GenericRdata(Rdata):
"""Generic Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
_import_lock = _threading.Lock()
def get_rdata_class(rdclass, rdtype):
def import_module(name):
with _import_lock:
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If *tok* is a ``text``, then a tokenizer is created and the string
is used as its input.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*tok*, a ``dns.tokenizer.Tokenizer`` or a ``text``.
*origin*, a ``dns.name.Name`` (or ``None``), the
origin to use for relative names.
*relativize*, a ``bool``. If true, name will be relativized to
the specified origin.
Returns an instance of the chosen Rdata subclass.
"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*wire*, a ``binary``, the wire-format message.
*current*, an ``int``, the offset in wire of the beginning of
the rdata.
*rdlen*, an ``int``, the length of the wire-format rdata
*origin*, a ``dns.name.Name`` (or ``None``). If not ``None``,
then names will be relativized to this origin.
Returns an instance of the chosen Rdata subclass.
"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
class RdatatypeExists(dns.exception.DNSException):
"""DNS rdatatype already exists."""
supp_kwargs = {'rdclass', 'rdtype'}
fmt = "The rdata type with class {rdclass} and rdtype {rdtype} " + \
"already exists."
def register_type(implementation, rdtype, rdtype_text, is_singleton=False,
rdclass=dns.rdataclass.IN):
"""Dynamically register a module to handle an rdatatype.
*implementation*, a module implementing the type in the usual dnspython
way.
*rdtype*, an ``int``, the rdatatype to register.
*rdtype_text*, a ``text``, the textual form of the rdatatype.
*is_singleton*, a ``bool``, indicating if the type is a singleton (i.e.
RRsets of the type can have only one member.)
*rdclass*, the rdataclass of the type, or ``dns.rdataclass.ANY`` if
it applies to all classes.
"""
existing_cls = get_rdata_class(rdclass, rdtype)
if existing_cls != GenericRdata:
raise RdatatypeExists(rdclass=rdclass, rdtype=rdtype)
_rdata_modules[(rdclass, rdtype)] = implementation
dns.rdatatype.register_type(rdtype, rdtype_text, is_singleton)

17
lib/dns/rdata.pyi Normal file
View File

@@ -0,0 +1,17 @@
from typing import Dict, Tuple, Any, Optional
from .name import Name
class Rdata:
def __init__(self):
self.address : str
def to_wire(self, file, compress : Optional[Dict[Name,int]], origin : Optional[Name]) -> bytes:
...
@classmethod
def from_text(cls, rdclass : int, rdtype : int, tok, origin=None, relativize=True):
...
_rdata_modules : Dict[Tuple[Any,Rdata],Any]
def from_text(rdclass : int, rdtype : int, tok : Optional[str], origin : Optional[Name] = None, relativize : bool = True):
...
def from_wire(rdclass : int, rdtype : int, wire : bytes, current : int, rdlen : int, origin : Optional[Name] = None):
...

122
lib/dns/rdataclass.py Normal file
View File

@@ -0,0 +1,122 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Rdata Classes."""
import re
import dns.exception
RESERVED0 = 0
IN = 1
CH = 3
HS = 4
NONE = 254
ANY = 255
_by_text = {
'RESERVED0': RESERVED0,
'IN': IN,
'CH': CH,
'HS': HS,
'NONE': NONE,
'ANY': ANY
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = {y: x for x, y in _by_text.items()}
# Now that we've built the inverse map, we can add class aliases to
# the _by_text mapping.
_by_text.update({
'INTERNET': IN,
'CHAOS': CH,
'HESIOD': HS
})
_metaclasses = {
NONE: True,
ANY: True
}
_unknown_class_pattern = re.compile('CLASS([0-9]+)$', re.I)
class UnknownRdataclass(dns.exception.DNSException):
"""A DNS class is unknown."""
def from_text(text):
"""Convert text into a DNS rdata class value.
The input text can be a defined DNS RR class mnemonic or
instance of the DNS generic class syntax.
For example, "IN" and "CLASS1" will both result in a value of 1.
Raises ``dns.rdatatype.UnknownRdataclass`` if the class is unknown.
Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535.
Returns an ``int``.
"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_class_pattern.match(text)
if match is None:
raise UnknownRdataclass
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("class must be between >= 0 and <= 65535")
return value
def to_text(value):
"""Convert a DNS rdata type value to text.
If the value has a known mnemonic, it will be used, otherwise the
DNS generic class syntax will be used.
Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535.
Returns a ``str``.
"""
if value < 0 or value > 65535:
raise ValueError("class must be between >= 0 and <= 65535")
text = _by_value.get(value)
if text is None:
text = 'CLASS' + repr(value)
return text
def is_metaclass(rdclass):
"""True if the specified class is a metaclass.
The currently defined metaclasses are ANY and NONE.
*rdclass* is an ``int``.
"""
if rdclass in _metaclasses:
return True
return False

347
lib/dns/rdataset.py Normal file
View File

@@ -0,0 +1,347 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
from io import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
from ._compat import string_types
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""An attempt was made to add a DNS SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
class IncompatibleTypes(dns.exception.DNSException):
"""An attempt was made to add DNS RR data of an incompatible type."""
class Rdataset(dns.set.Set):
"""A DNS rdataset."""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE, ttl=0):
"""Create a new rdataset of the specified class and type.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*covers*, an ``int``, the covered rdatatype.
*ttl*, an ``int``, the TTL.
"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = ttl
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Perform TTL minimization.
Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
*ttl*, an ``int``.
"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional *ttl* parameter is supplied, then
``self.update_ttl(ttl)`` will be called prior to adding the rdata.
*rd*, a ``dns.rdata.Rdata``, the rdata
*ttl*, an ``int``, the TTL.
Raises ``dns.rdataset.IncompatibleTypes`` if the type and class
do not match the type and class of the rdataset.
Raises ``dns.rdataset.DifferingCovers`` if the type is a signature
type and the covered type does not match that of the rdataset.
"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if ttl is not None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
*other*, a ``dns.rdataset.Rdataset``, the rdataset from which
to update.
"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
See ``dns.name.Name.choose_relativity`` for more information
on how *origin* and *relativize* determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
``to_text()`` method.
*name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with
*name* as the owner name.
*origin*, a ``dns.name.Name`` or ``None``, the origin for relative
names.
*relativize*, a ``bool``. If ``True``, names will be relativized
to *origin*.
"""
if name is not None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO()
if override_rdclass is not None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
s.write(u'{}{}{} {}\n'.format(ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype)))
else:
for rd in self:
s.write(u'%s%s%d %s %s %s\n' %
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize,
**kw)))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
*name*, a ``dns.name.Name`` is the owner name to use.
*file* is the file where the name is emitted (typically a
BytesIO file).
*compress*, a ``dict``, is the compression table to use. If
``None`` (the default), names will not be compressed.
*origin* is a ``dns.name.Name`` or ``None``. If the name is
relative and origin is not ``None``, then *origin* will be appended
to it.
*override_rdclass*, an ``int``, is used as the class instead of the
class of the rdataset. This is useful when rendering rdatasets
associated with dynamic updates.
*want_shuffle*, a ``bool``. If ``True``, then the order of the
Rdatas within the Rdataset will be shuffled before rendering.
Returns an ``int``, the number of records emitted.
"""
if override_rdclass is not None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns ``True`` if this rdataset matches the specified class,
type, and covers.
"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if isinstance(rdclass, string_types):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_rdata_list(ttl, rdatas)

58
lib/dns/rdataset.pyi Normal file
View File

@@ -0,0 +1,58 @@
from typing import Optional, Dict, List, Union
from io import BytesIO
from . import exception, name, set, rdatatype, rdata, rdataset
class DifferingCovers(exception.DNSException):
"""An attempt was made to add a DNS SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
class IncompatibleTypes(exception.DNSException):
"""An attempt was made to add DNS RR data of an incompatible type."""
class Rdataset(set.Set):
def __init__(self, rdclass, rdtype, covers=rdatatype.NONE, ttl=0):
self.rdclass : int = rdclass
self.rdtype : int = rdtype
self.covers : int = covers
self.ttl : int = ttl
def update_ttl(self, ttl : int) -> None:
...
def add(self, rd : rdata.Rdata, ttl : Optional[int] =None):
...
def union_update(self, other : Rdataset):
...
def intersection_update(self, other : Rdataset):
...
def update(self, other : Rdataset):
...
def to_text(self, name : Optional[name.Name] =None, origin : Optional[name.Name] =None, relativize=True,
override_rdclass : Optional[int] =None, **kw) -> bytes:
...
def to_wire(self, name : Optional[name.Name], file : BytesIO, compress : Optional[Dict[name.Name, int]] = None, origin : Optional[name.Name] = None,
override_rdclass : Optional[int] = None, want_shuffle=True) -> int:
...
def match(self, rdclass : int, rdtype : int, covers : int) -> bool:
...
def from_text_list(rdclass : Union[int,str], rdtype : Union[int,str], ttl : int, text_rdatas : str) -> rdataset.Rdataset:
...
def from_text(rdclass : Union[int,str], rdtype : Union[int,str], ttl : int, *text_rdatas : str) -> rdataset.Rdataset:
...
def from_rdata_list(ttl : int, rdatas : List[rdata.Rdata]) -> rdataset.Rdataset:
...
def from_rdata(ttl : int, *rdatas : List[rdata.Rdata]) -> rdataset.Rdataset:
...

287
lib/dns/rdatatype.py Normal file
View File

@@ -0,0 +1,287 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Rdata Types."""
import re
import dns.exception
NONE = 0
A = 1
NS = 2
MD = 3
MF = 4
CNAME = 5
SOA = 6
MB = 7
MG = 8
MR = 9
NULL = 10
WKS = 11
PTR = 12
HINFO = 13
MINFO = 14
MX = 15
TXT = 16
RP = 17
AFSDB = 18
X25 = 19
ISDN = 20
RT = 21
NSAP = 22
NSAP_PTR = 23
SIG = 24
KEY = 25
PX = 26
GPOS = 27
AAAA = 28
LOC = 29
NXT = 30
SRV = 33
NAPTR = 35
KX = 36
CERT = 37
A6 = 38
DNAME = 39
OPT = 41
APL = 42
DS = 43
SSHFP = 44
IPSECKEY = 45
RRSIG = 46
NSEC = 47
DNSKEY = 48
DHCID = 49
NSEC3 = 50
NSEC3PARAM = 51
TLSA = 52
HIP = 55
CDS = 59
CDNSKEY = 60
OPENPGPKEY = 61
CSYNC = 62
SPF = 99
UNSPEC = 103
EUI48 = 108
EUI64 = 109
TKEY = 249
TSIG = 250
IXFR = 251
AXFR = 252
MAILB = 253
MAILA = 254
ANY = 255
URI = 256
CAA = 257
AVC = 258
TA = 32768
DLV = 32769
_by_text = {
'NONE': NONE,
'A': A,
'NS': NS,
'MD': MD,
'MF': MF,
'CNAME': CNAME,
'SOA': SOA,
'MB': MB,
'MG': MG,
'MR': MR,
'NULL': NULL,
'WKS': WKS,
'PTR': PTR,
'HINFO': HINFO,
'MINFO': MINFO,
'MX': MX,
'TXT': TXT,
'RP': RP,
'AFSDB': AFSDB,
'X25': X25,
'ISDN': ISDN,
'RT': RT,
'NSAP': NSAP,
'NSAP-PTR': NSAP_PTR,
'SIG': SIG,
'KEY': KEY,
'PX': PX,
'GPOS': GPOS,
'AAAA': AAAA,
'LOC': LOC,
'NXT': NXT,
'SRV': SRV,
'NAPTR': NAPTR,
'KX': KX,
'CERT': CERT,
'A6': A6,
'DNAME': DNAME,
'OPT': OPT,
'APL': APL,
'DS': DS,
'SSHFP': SSHFP,
'IPSECKEY': IPSECKEY,
'RRSIG': RRSIG,
'NSEC': NSEC,
'DNSKEY': DNSKEY,
'DHCID': DHCID,
'NSEC3': NSEC3,
'NSEC3PARAM': NSEC3PARAM,
'TLSA': TLSA,
'HIP': HIP,
'CDS': CDS,
'CDNSKEY': CDNSKEY,
'OPENPGPKEY': OPENPGPKEY,
'CSYNC': CSYNC,
'SPF': SPF,
'UNSPEC': UNSPEC,
'EUI48': EUI48,
'EUI64': EUI64,
'TKEY': TKEY,
'TSIG': TSIG,
'IXFR': IXFR,
'AXFR': AXFR,
'MAILB': MAILB,
'MAILA': MAILA,
'ANY': ANY,
'URI': URI,
'CAA': CAA,
'AVC': AVC,
'TA': TA,
'DLV': DLV,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = {y: x for x, y in _by_text.items()}
_metatypes = {
OPT: True
}
_singletons = {
SOA: True,
NXT: True,
DNAME: True,
NSEC: True,
CNAME: True,
}
_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I)
class UnknownRdatatype(dns.exception.DNSException):
"""DNS resource record type is unknown."""
def from_text(text):
"""Convert text into a DNS rdata type value.
The input text can be a defined DNS RR type mnemonic or
instance of the DNS generic type syntax.
For example, "NS" and "TYPE2" will both result in a value of 2.
Raises ``dns.rdatatype.UnknownRdatatype`` if the type is unknown.
Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535.
Returns an ``int``.
"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_type_pattern.match(text)
if match is None:
raise UnknownRdatatype
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
return value
def to_text(value):
"""Convert a DNS rdata type value to text.
If the value has a known mnemonic, it will be used, otherwise the
DNS generic type syntax will be used.
Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535.
Returns a ``str``.
"""
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
text = _by_value.get(value)
if text is None:
text = 'TYPE' + repr(value)
return text
def is_metatype(rdtype):
"""True if the specified type is a metatype.
*rdtype* is an ``int``.
The currently defined metatypes are TKEY, TSIG, IXFR, AXFR, MAILA,
MAILB, ANY, and OPT.
Returns a ``bool``.
"""
if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes:
return True
return False
def is_singleton(rdtype):
"""Is the specified type a singleton type?
Singleton types can only have a single rdata in an rdataset, or a single
RR in an RRset.
The currently defined singleton types are CNAME, DNAME, NSEC, NXT, and
SOA.
*rdtype* is an ``int``.
Returns a ``bool``.
"""
if rdtype in _singletons:
return True
return False
def register_type(rdtype, rdtype_text, is_singleton=False): # pylint: disable=redefined-outer-name
"""Dynamically register an rdatatype.
*rdtype*, an ``int``, the rdatatype to register.
*rdtype_text*, a ``text``, the textual form of the rdatatype.
*is_singleton*, a ``bool``, indicating if the type is a singleton (i.e.
RRsets of the type can have only one member.)
"""
_by_text[rdtype_text] = rdtype
_by_value[rdtype] = rdtype_text
if is_singleton:
_singletons[rdtype] = True

View File

@@ -0,0 +1,55 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.mxbase
class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
"""AFSDB record
@ivar subtype: the subtype value
@type subtype: int
@ivar hostname: the hostname name
@type hostname: dns.name.Name object"""
# Use the property mechanism to make "subtype" an alias for the
# "preference" attribute, and "hostname" an alias for the "exchange"
# attribute.
#
# This lets us inherit the UncompressedMX implementation but lets
# the caller use appropriate attribute names for the rdata type.
#
# We probably lose some performance vs. a cut-and-paste
# implementation, but this way we don't copy code, and that's
# good.
def get_subtype(self):
return self.preference
def set_subtype(self, subtype):
self.preference = subtype
subtype = property(get_subtype, set_subtype)
def get_hostname(self):
return self.exchange
def set_hostname(self, hostname):
self.exchange = hostname
hostname = property(get_hostname, set_hostname)

View File

@@ -0,0 +1,25 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2016 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.txtbase
class AVC(dns.rdtypes.txtbase.TXTBase):
"""AVC record
@see: U{http://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template}"""

View File

@@ -0,0 +1,75 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.tokenizer
class CAA(dns.rdata.Rdata):
"""CAA (Certification Authority Authorization) record
@ivar flags: the flags
@type flags: int
@ivar tag: the tag
@type tag: string
@ivar value: the value
@type value: string
@see: RFC 6844"""
__slots__ = ['flags', 'tag', 'value']
def __init__(self, rdclass, rdtype, flags, tag, value):
super(CAA, self).__init__(rdclass, rdtype)
self.flags = flags
self.tag = tag
self.value = value
def to_text(self, origin=None, relativize=True, **kw):
return '%u %s "%s"' % (self.flags,
dns.rdata._escapify(self.tag),
dns.rdata._escapify(self.value))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
flags = tok.get_uint8()
tag = tok.get_string().encode()
if len(tag) > 255:
raise dns.exception.SyntaxError("tag too long")
if not tag.isalnum():
raise dns.exception.SyntaxError("tag is not alphanumeric")
value = tok.get_string().encode()
return cls(rdclass, rdtype, flags, tag, value)
def to_wire(self, file, compress=None, origin=None):
file.write(struct.pack('!B', self.flags))
l = len(self.tag)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.tag)
file.write(self.value)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(flags, l) = struct.unpack('!BB', wire[current: current + 2])
current += 2
tag = wire[current: current + l]
value = wire[current + l:current + rdlen - 2]
return cls(rdclass, rdtype, flags, tag, value)

View File

@@ -0,0 +1,27 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dnskeybase
from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set
__all__ = ['flags_to_text_set', 'flags_from_text_set']
class CDNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
"""CDNSKEY record"""

View File

@@ -0,0 +1,23 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dsbase
class CDS(dns.rdtypes.dsbase.DSBase):
"""CDS record"""

123
lib/dns/rdtypes/ANY/CERT.py Normal file
View File

@@ -0,0 +1,123 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import base64
import dns.exception
import dns.dnssec
import dns.rdata
import dns.tokenizer
_ctype_by_value = {
1: 'PKIX',
2: 'SPKI',
3: 'PGP',
253: 'URI',
254: 'OID',
}
_ctype_by_name = {
'PKIX': 1,
'SPKI': 2,
'PGP': 3,
'URI': 253,
'OID': 254,
}
def _ctype_from_text(what):
v = _ctype_by_name.get(what)
if v is not None:
return v
return int(what)
def _ctype_to_text(what):
v = _ctype_by_value.get(what)
if v is not None:
return v
return str(what)
class CERT(dns.rdata.Rdata):
"""CERT record
@ivar certificate_type: certificate type
@type certificate_type: int
@ivar key_tag: key tag
@type key_tag: int
@ivar algorithm: algorithm
@type algorithm: int
@ivar certificate: the certificate or CRL
@type certificate: string
@see: RFC 2538"""
__slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate):
super(CERT, self).__init__(rdclass, rdtype)
self.certificate_type = certificate_type
self.key_tag = key_tag
self.algorithm = algorithm
self.certificate = certificate
def to_text(self, origin=None, relativize=True, **kw):
certificate_type = _ctype_to_text(self.certificate_type)
return "%s %d %s %s" % (certificate_type, self.key_tag,
dns.dnssec.algorithm_to_text(self.algorithm),
dns.rdata._base64ify(self.certificate))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
certificate_type = _ctype_from_text(tok.get_string())
key_tag = tok.get_uint16()
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
if algorithm < 0 or algorithm > 255:
raise dns.exception.SyntaxError("bad algorithm type")
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
b64 = b''.join(chunks)
certificate = base64.b64decode(b64)
return cls(rdclass, rdtype, certificate_type, key_tag,
algorithm, certificate)
def to_wire(self, file, compress=None, origin=None):
prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
self.algorithm)
file.write(prefix)
file.write(self.certificate)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
prefix = wire[current: current + 5].unwrap()
current += 5
rdlen -= 5
if rdlen < 0:
raise dns.exception.FormError
(certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
certificate = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate)

View File

@@ -0,0 +1,27 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class CNAME(dns.rdtypes.nsbase.NSBase):
"""CNAME record
Note: although CNAME is officially a singleton type, dnspython allows
non-singleton CNAME rdatasets because such sets have been commonly
used by BIND and other nameservers for load balancing."""

View File

@@ -0,0 +1,126 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2004-2007, 2009-2011, 2016 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
from dns._compat import xrange
class CSYNC(dns.rdata.Rdata):
"""CSYNC record
@ivar serial: the SOA serial number
@type serial: int
@ivar flags: the CSYNC flags
@type flags: int
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['serial', 'flags', 'windows']
def __init__(self, rdclass, rdtype, serial, flags, windows):
super(CSYNC, self).__init__(rdclass, rdtype)
self.serial = serial
self.flags = flags
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = bitmap[i]
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 +
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%d %d%s' % (self.serial, self.flags, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
serial = tok.get_uint32()
flags = tok.get_uint16()
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("CSYNC with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("CSYNC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = bytearray(b'\0' * 32)
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, bitmap[0:octets]))
bitmap = bytearray(b'\0' * 32)
window = new_window
offset = nrdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = bitmap[byte] | (0x80 >> bit)
windows.append((window, bitmap[0:octets]))
return cls(rdclass, rdtype, serial, flags, windows)
def to_wire(self, file, compress=None, origin=None):
file.write(struct.pack('!IH', self.serial, self.flags))
for (window, bitmap) in self.windows:
file.write(struct.pack('!BB', window, len(bitmap)))
file.write(bitmap)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
if rdlen < 6:
raise dns.exception.FormError("CSYNC too short")
(serial, flags) = struct.unpack("!IH", wire[current: current + 6])
current += 6
rdlen -= 6
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("CSYNC too short")
window = wire[current]
octets = wire[current + 1]
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad CSYNC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad CSYNC bitmap length")
bitmap = bytearray(wire[current: current + octets].unwrap())
current += octets
rdlen -= octets
windows.append((window, bitmap))
return cls(rdclass, rdtype, serial, flags, windows)

View File

@@ -0,0 +1,23 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dsbase
class DLV(dns.rdtypes.dsbase.DSBase):
"""DLV record"""

View File

@@ -0,0 +1,26 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class DNAME(dns.rdtypes.nsbase.UncompressedNS):
"""DNAME record"""
def to_digestable(self, origin=None):
return self.target.to_digestable(origin)

View File

@@ -0,0 +1,27 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dnskeybase
from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set
__all__ = ['flags_to_text_set', 'flags_from_text_set']
class DNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
"""DNSKEY record"""

23
lib/dns/rdtypes/ANY/DS.py Normal file
View File

@@ -0,0 +1,23 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dsbase
class DS(dns.rdtypes.dsbase.DSBase):
"""DS record"""

View File

@@ -0,0 +1,29 @@
# Copyright (C) 2015 Red Hat, Inc.
# Author: Petr Spacek <pspacek@redhat.com>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.euibase
class EUI48(dns.rdtypes.euibase.EUIBase):
"""EUI48 record
@ivar fingerprint: 48-bit Extended Unique Identifier (EUI-48)
@type fingerprint: string
@see: rfc7043.txt"""
byte_len = 6 # 0123456789ab (in hex)
text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab

View File

@@ -0,0 +1,29 @@
# Copyright (C) 2015 Red Hat, Inc.
# Author: Petr Spacek <pspacek@redhat.com>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.euibase
class EUI64(dns.rdtypes.euibase.EUIBase):
"""EUI64 record
@ivar fingerprint: 64-bit Extended Unique Identifier (EUI-64)
@type fingerprint: string
@see: rfc7043.txt"""
byte_len = 8 # 0123456789abcdef (in hex)
text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab-cd-ef

162
lib/dns/rdtypes/ANY/GPOS.py Normal file
View File

@@ -0,0 +1,162 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.tokenizer
from dns._compat import long, text_type
def _validate_float_string(what):
if what[0] == b'-'[0] or what[0] == b'+'[0]:
what = what[1:]
if what.isdigit():
return
(left, right) = what.split(b'.')
if left == b'' and right == b'':
raise dns.exception.FormError
if not left == b'' and not left.decode().isdigit():
raise dns.exception.FormError
if not right == b'' and not right.decode().isdigit():
raise dns.exception.FormError
def _sanitize(value):
if isinstance(value, text_type):
return value.encode()
return value
class GPOS(dns.rdata.Rdata):
"""GPOS record
@ivar latitude: latitude
@type latitude: string
@ivar longitude: longitude
@type longitude: string
@ivar altitude: altitude
@type altitude: string
@see: RFC 1712"""
__slots__ = ['latitude', 'longitude', 'altitude']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
super(GPOS, self).__init__(rdclass, rdtype)
if isinstance(latitude, float) or \
isinstance(latitude, int) or \
isinstance(latitude, long):
latitude = str(latitude)
if isinstance(longitude, float) or \
isinstance(longitude, int) or \
isinstance(longitude, long):
longitude = str(longitude)
if isinstance(altitude, float) or \
isinstance(altitude, int) or \
isinstance(altitude, long):
altitude = str(altitude)
latitude = _sanitize(latitude)
longitude = _sanitize(longitude)
altitude = _sanitize(altitude)
_validate_float_string(latitude)
_validate_float_string(longitude)
_validate_float_string(altitude)
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def to_text(self, origin=None, relativize=True, **kw):
return '{} {} {}'.format(self.latitude.decode(),
self.longitude.decode(),
self.altitude.decode())
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
latitude = tok.get_string()
longitude = tok.get_string()
altitude = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude)
def to_wire(self, file, compress=None, origin=None):
l = len(self.latitude)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.latitude)
l = len(self.longitude)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.longitude)
l = len(self.altitude)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.altitude)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
l = wire[current]
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
latitude = wire[current: current + l].unwrap()
current += l
rdlen -= l
l = wire[current]
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
longitude = wire[current: current + l].unwrap()
current += l
rdlen -= l
l = wire[current]
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
altitude = wire[current: current + l].unwrap()
return cls(rdclass, rdtype, latitude, longitude, altitude)
def _get_float_latitude(self):
return float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = str(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = str(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
def _get_float_altitude(self):
return float(self.altitude)
def _set_float_altitude(self, value):
self.altitude = str(value)
float_altitude = property(_get_float_altitude, _set_float_altitude,
doc="altitude as a floating point value")

View File

@@ -0,0 +1,86 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.tokenizer
from dns._compat import text_type
class HINFO(dns.rdata.Rdata):
"""HINFO record
@ivar cpu: the CPU type
@type cpu: string
@ivar os: the OS type
@type os: string
@see: RFC 1035"""
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super(HINFO, self).__init__(rdclass, rdtype)
if isinstance(cpu, text_type):
self.cpu = cpu.encode()
else:
self.cpu = cpu
if isinstance(os, text_type):
self.os = os.encode()
else:
self.os = os
def to_text(self, origin=None, relativize=True, **kw):
return '"{}" "{}"'.format(dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
cpu = tok.get_string()
os = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, cpu, os)
def to_wire(self, file, compress=None, origin=None):
l = len(self.cpu)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.cpu)
l = len(self.os)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.os)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
l = wire[current]
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
cpu = wire[current:current + l].unwrap()
current += l
rdlen -= l
l = wire[current]
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
os = wire[current: current + l].unwrap()
return cls(rdclass, rdtype, cpu, os)

115
lib/dns/rdtypes/ANY/HIP.py Normal file
View File

@@ -0,0 +1,115 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2010, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import base64
import binascii
import dns.exception
import dns.rdata
import dns.rdatatype
class HIP(dns.rdata.Rdata):
"""HIP record
@ivar hit: the host identity tag
@type hit: string
@ivar algorithm: the public key cryptographic algorithm
@type algorithm: int
@ivar key: the public key
@type key: string
@ivar servers: the rendezvous servers
@type servers: list of dns.name.Name objects
@see: RFC 5205"""
__slots__ = ['hit', 'algorithm', 'key', 'servers']
def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
super(HIP, self).__init__(rdclass, rdtype)
self.hit = hit
self.algorithm = algorithm
self.key = key
self.servers = servers
def to_text(self, origin=None, relativize=True, **kw):
hit = binascii.hexlify(self.hit).decode()
key = base64.b64encode(self.key).replace(b'\n', b'').decode()
text = u''
servers = []
for server in self.servers:
servers.append(server.choose_relativity(origin, relativize))
if len(servers) > 0:
text += (u' ' + u' '.join((x.to_unicode() for x in servers)))
return u'%u %s %s%s' % (self.algorithm, hit, key, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
algorithm = tok.get_uint8()
hit = binascii.unhexlify(tok.get_string().encode())
if len(hit) > 255:
raise dns.exception.SyntaxError("HIT too long")
key = base64.b64decode(tok.get_string().encode())
servers = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
server = dns.name.from_text(token.value, origin)
server.choose_relativity(origin, relativize)
servers.append(server)
return cls(rdclass, rdtype, hit, algorithm, key, servers)
def to_wire(self, file, compress=None, origin=None):
lh = len(self.hit)
lk = len(self.key)
file.write(struct.pack("!BBH", lh, self.algorithm, lk))
file.write(self.hit)
file.write(self.key)
for server in self.servers:
server.to_wire(file, None, origin)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(lh, algorithm, lk) = struct.unpack('!BBH',
wire[current: current + 4])
current += 4
rdlen -= 4
hit = wire[current: current + lh].unwrap()
current += lh
rdlen -= lh
key = wire[current: current + lk].unwrap()
current += lk
rdlen -= lk
servers = []
while rdlen > 0:
(server, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
if origin is not None:
server = server.relativize(origin)
servers.append(server)
return cls(rdclass, rdtype, hit, algorithm, key, servers)
def choose_relativity(self, origin=None, relativize=True):
servers = []
for server in self.servers:
server = server.choose_relativity(origin, relativize)
servers.append(server)
self.servers = servers

View File

@@ -0,0 +1,99 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.tokenizer
from dns._compat import text_type
class ISDN(dns.rdata.Rdata):
"""ISDN record
@ivar address: the ISDN address
@type address: string
@ivar subaddress: the ISDN subaddress (or '' if not present)
@type subaddress: string
@see: RFC 1183"""
__slots__ = ['address', 'subaddress']
def __init__(self, rdclass, rdtype, address, subaddress):
super(ISDN, self).__init__(rdclass, rdtype)
if isinstance(address, text_type):
self.address = address.encode()
else:
self.address = address
if isinstance(address, text_type):
self.subaddress = subaddress.encode()
else:
self.subaddress = subaddress
def to_text(self, origin=None, relativize=True, **kw):
if self.subaddress:
return '"{}" "{}"'.format(dns.rdata._escapify(self.address),
dns.rdata._escapify(self.subaddress))
else:
return '"%s"' % dns.rdata._escapify(self.address)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
address = tok.get_string()
t = tok.get()
if not t.is_eol_or_eof():
tok.unget(t)
subaddress = tok.get_string()
else:
tok.unget(t)
subaddress = ''
tok.get_eol()
return cls(rdclass, rdtype, address, subaddress)
def to_wire(self, file, compress=None, origin=None):
l = len(self.address)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.address)
l = len(self.subaddress)
if l > 0:
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.subaddress)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
l = wire[current]
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
address = wire[current: current + l].unwrap()
current += l
rdlen -= l
if rdlen > 0:
l = wire[current]
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
subaddress = wire[current: current + l].unwrap()
else:
subaddress = ''
return cls(rdclass, rdtype, address, subaddress)

Some files were not shown because too many files have changed in this diff Show More